You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/06/18 22:03:15 UTC

[24/67] [abbrv] hive git commit: HIVE-19629: Enable Decimal64 reader after orc version upgrade (Prasanth Jayachandran reviewed by Matt McCline)

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
index 8e02351..8f0cc4d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_2.q.out
@@ -50,22 +50,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToBoolean(t) (type: boolean)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToBoolean(col 0:decimal(18,9)) -> 2:boolean
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToBoolean(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:boolean
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: boolean)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -75,8 +75,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -84,9 +84,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -166,22 +166,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToByte(t) (type: tinyint)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:tinyint
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:tinyint
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -191,8 +191,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -200,9 +200,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -282,22 +282,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToShort(t) (type: smallint)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:smallint
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:smallint
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: smallint)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -307,8 +307,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -316,9 +316,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -398,22 +398,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToInteger(t) (type: int)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:int
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:int
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -423,8 +423,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -432,9 +432,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -514,22 +514,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToLong(t) (type: bigint)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:bigint
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:bigint
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -539,8 +539,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -548,9 +548,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -630,22 +630,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToFloat(t) (type: float)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToFloat(col 0:decimal(18,9)) -> 2:float
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToFloat(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:float
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: float)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -655,8 +655,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -664,9 +664,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [double]
+                    scratchColumnTypeNames: [decimal(18,9), double]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -746,22 +746,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToDouble(t) (type: double)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToDouble(col 0:decimal(18,9)) -> 2:double
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToDouble(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:double
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: double)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -771,8 +771,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -780,9 +780,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [double]
+                    scratchColumnTypeNames: [decimal(18,9), double]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -862,22 +862,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToString(t) (type: string)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToString(col 0:decimal(18,9)) -> 2:string
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToString(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:string
                     Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -887,8 +887,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -896,9 +896,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [string]
+                    scratchColumnTypeNames: [decimal(18,9), string]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -989,22 +989,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToBoolean(t) (type: boolean)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToBoolean(col 0:decimal(18,9)) -> 2:boolean
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToBoolean(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:boolean
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: boolean)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1014,8 +1014,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1023,9 +1023,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1105,22 +1105,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToByte(t) (type: tinyint)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:tinyint
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:tinyint
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: tinyint)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1130,8 +1130,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1139,9 +1139,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1221,22 +1221,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToShort(t) (type: smallint)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:smallint
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:smallint
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: smallint)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1246,8 +1246,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1255,9 +1255,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1337,22 +1337,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToInteger(t) (type: int)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:int
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:int
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: int)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1362,8 +1362,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1371,9 +1371,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1453,22 +1453,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToLong(t) (type: bigint)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToLong(col 0:decimal(18,9)) -> 2:bigint
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToLong(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:bigint
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: bigint)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1478,8 +1478,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1487,9 +1487,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [bigint]
+                    scratchColumnTypeNames: [decimal(18,9), bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1569,22 +1569,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToFloat(t) (type: float)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToFloat(col 0:decimal(18,9)) -> 2:float
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToFloat(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:float
                     Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: float)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1594,8 +1594,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1603,9 +1603,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [double]
+                    scratchColumnTypeNames: [decimal(18,9), double]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1685,22 +1685,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToDouble(t) (type: double)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToDouble(col 0:decimal(18,9)) -> 2:double
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToDouble(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:double
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: double)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1710,8 +1710,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1719,9 +1719,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [double]
+                    scratchColumnTypeNames: [decimal(18,9), double]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1801,22 +1801,22 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: UDFToString(t) (type: string)
                     outputColumnNames: _col0
                     Select Vectorization:
                         className: VectorSelectOperator
                         native: true
-                        projectedOutputColumnNums: [2]
-                        selectExpressions: CastDecimalToString(col 0:decimal(18,9)) -> 2:string
+                        projectedOutputColumnNums: [3]
+                        selectExpressions: CastDecimalToString(col 2:decimal(18,9))(children: ConvertDecimal64ToDecimal(col 0:decimal(18,9)/DECIMAL_64) -> 2:decimal(18,9)) -> 3:string
                     Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Reduce Sink Vectorization:
                           className: VectorReduceSinkObjectHashOperator
-                          keyColumnNums: [2]
+                          keyColumnNums: [3]
                           native: true
                           nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
                           valueColumnNums: []
@@ -1826,8 +1826,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -1835,9 +1835,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: [0]
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [string]
+                    scratchColumnTypeNames: [decimal(18,9), string]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:
@@ -1914,7 +1914,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 3.14 (type: decimal(4,2))
                     outputColumnNames: _col0
@@ -1939,8 +1939,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -1948,7 +1948,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(4,2)]
 
@@ -1993,7 +1993,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 3.14 (type: decimal(4,2))
                     outputColumnNames: _col0
@@ -2018,8 +2018,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2027,7 +2027,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(4,2)]
 
@@ -2072,7 +2072,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 1355944339.1234567 (type: decimal(30,8))
                     outputColumnNames: _col0
@@ -2097,8 +2097,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2106,7 +2106,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(30,8)]
 
@@ -2151,7 +2151,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 1 (type: decimal(10,0))
                     outputColumnNames: _col0
@@ -2176,8 +2176,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2185,7 +2185,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(10,0)]
 
@@ -2221,7 +2221,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 1 (type: decimal(10,0))
                     outputColumnNames: _col0
@@ -2246,8 +2246,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2255,7 +2255,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(10,0)]
 
@@ -2300,7 +2300,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 3 (type: decimal(10,0))
                     outputColumnNames: _col0
@@ -2325,8 +2325,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2334,7 +2334,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(10,0)]
 
@@ -2379,7 +2379,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 3 (type: decimal(10,0))
                     outputColumnNames: _col0
@@ -2404,8 +2404,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2413,7 +2413,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(10,0)]
 
@@ -2458,7 +2458,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 3 (type: decimal(10,0))
                     outputColumnNames: _col0
@@ -2483,8 +2483,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2492,7 +2492,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(10,0)]
 
@@ -2537,7 +2537,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 3 (type: decimal(10,0))
                     outputColumnNames: _col0
@@ -2562,8 +2562,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2571,7 +2571,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(10,0)]
 
@@ -2616,7 +2616,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 1 (type: decimal(20,19))
                     outputColumnNames: _col0
@@ -2641,8 +2641,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2650,7 +2650,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(20,19)]
 
@@ -2695,7 +2695,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:t:decimal(18,9), 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:t:decimal(18,9)/DECIMAL_64, 1:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: 0.99999999999999999999 (type: decimal(20,20))
                     outputColumnNames: _col0
@@ -2720,8 +2720,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: false
                 usesVectorUDFAdaptor: false
@@ -2729,7 +2729,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 1
                     includeColumns: []
-                    dataColumns: t:decimal(18,9)
+                    dataColumns: t:decimal(18,9)/DECIMAL_64
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(20,20)]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
index 0bfd12e..5bea214 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_5.q.out
@@ -129,6 +129,44 @@ NULL
 124.00000
 125.20000
 200.00000
+PREHOOK: query: explain SELECT cast(key as decimal) FROM DECIMAL_5
+PREHOOK: type: QUERY
+POSTHOOK: query: explain SELECT cast(key as decimal) FROM DECIMAL_5
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_5
+                  Statistics: Num rows: 38 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: CAST( key AS decimal(10,0)) (type: decimal(10,0))
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 38 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 38 Data size: 4032 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_5
@@ -137,42 +175,42 @@ POSTHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_5
 #### A masked pattern was here ####
--4400
+-440000000
 NULL
 0
 0
-100
-10
-1
-0
-0
-200
-20
-2
-0
-0
+10000000
+1000000
+100000
+10000
+1000
+20000000
+2000000
+200000
 0
-0
-0
-0
-0
-0
-0
-1
-2
-3
--1
--1
--1
-1
-1
-124
-125
--1255
-3
-3
-3
-1
+20000
+2000
+30000
+33000
+33300
+-30000
+-33000
+-33300
+100000
+200000
+314000
+-112000
+-112000
+-112200
+112000
+112200
+12400000
+12520000
+-125549000
+314000
+314000
+314000
+100000
 NULL
 NULL
 PREHOOK: query: SELECT cast(key as decimal(6,3)) FROM DECIMAL_5
@@ -187,38 +225,38 @@ NULL
 NULL
 0.000
 0.000
+NULL
+NULL
 100.000
 10.000
 1.000
-0.100
-0.010
+NULL
+NULL
 200.000
-20.000
-2.000
 0.000
-0.200
-0.020
-0.300
-0.330
-0.333
--0.300
--0.330
--0.333
-1.000
+20.000
 2.000
-3.140
--1.120
--1.120
--1.122
-1.120
-1.122
-124.000
-125.200
-NULL
-3.140
-3.140
-3.140
-1.000
+30.000
+33.000
+33.300
+-30.000
+-33.000
+-33.300
+100.000
+200.000
+314.000
+-112.000
+-112.000
+-112.200
+112.000
+112.200
+NULL
+NULL
+NULL
+314.000
+314.000
+314.000
+100.000
 NULL
 NULL
 PREHOOK: query: DROP TABLE DECIMAL_5_txt

http://git-wip-us.apache.org/repos/asf/hive/blob/dd512593/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
index 800a4ae..705bf8b 100644
--- a/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_decimal_6.q.out
@@ -135,7 +135,7 @@ STAGE PLANS:
                   Statistics: Num rows: 27 Data size: 2684 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: key (type: decimal(10,5)), value (type: int)
                     outputColumnNames: _col0, _col1
@@ -159,8 +159,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -168,7 +168,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 2
                     includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int
+                    dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 2 
@@ -276,7 +276,7 @@ STAGE PLANS:
                   Statistics: Num rows: 27 Data size: 3132 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:decimal(17,4), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:decimal(17,4)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: key (type: decimal(17,4)), value (type: int)
                     outputColumnNames: _col0, _col1
@@ -300,8 +300,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -309,7 +309,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 2
                     includeColumns: [0, 1]
-                    dataColumns: key:decimal(17,4), value:int
+                    dataColumns: key:decimal(17,4)/DECIMAL_64, value:int
                     partitionColumnCount: 0
                     scratchColumnTypeNames: []
         Reducer 2 
@@ -427,7 +427,7 @@ STAGE PLANS:
                   Statistics: Num rows: 27 Data size: 2576 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: CAST( key AS decimal(18,5)) (type: decimal(18,5))
                     outputColumnNames: _col0
@@ -435,7 +435,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumnNums: [3]
-                        selectExpressions: CastDecimalToDecimal(col 0:decimal(10,5)) -> 3:decimal(18,5)
+                        selectExpressions: ConvertDecimal64ToDecimal(col 0:decimal(18,5)/DECIMAL_64) -> 3:decimal(18,5)
                     Statistics: Num rows: 27 Data size: 2576 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(18,5))
@@ -452,8 +452,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -461,7 +461,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 2
                     includeColumns: [0]
-                    dataColumns: key:decimal(10,5), value:int
+                    dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(18,5)]
         Map 4 
@@ -471,7 +471,7 @@ STAGE PLANS:
                   Statistics: Num rows: 27 Data size: 3024 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:decimal(17,4), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:decimal(17,4)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: CAST( key AS decimal(18,5)) (type: decimal(18,5))
                     outputColumnNames: _col0
@@ -479,7 +479,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumnNums: [3]
-                        selectExpressions: CastDecimalToDecimal(col 0:decimal(17,4)) -> 3:decimal(18,5)
+                        selectExpressions: ConvertDecimal64ToDecimal(col 0:decimal(18,5)/DECIMAL_64) -> 3:decimal(18,5)
                     Statistics: Num rows: 27 Data size: 3024 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(18,5))
@@ -496,8 +496,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -505,7 +505,7 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 2
                     includeColumns: [0]
-                    dataColumns: key:decimal(17,4), value:int
+                    dataColumns: key:decimal(17,4)/DECIMAL_64, value:int
                     partitionColumnCount: 0
                     scratchColumnTypeNames: [decimal(18,5)]
         Reducer 3 
@@ -575,54 +575,54 @@ NULL
 NULL
 NULL
 NULL
--1234567890.12350
+-123456789.01235
 -4400.00000
--4400.00000
--1255.49000
 -1255.49000
--1.12200
+-440.00000
+-125.54900
 -1.12200
 -1.12000
--1.12000
--0.33300
 -0.33300
 -0.30000
--0.30000
+-0.11220
+-0.11200
+-0.03330
+-0.03000
 0.00000
 0.00000
 0.00000
 0.00000
-0.33300
+0.03330
+0.10000
+0.10000
+0.11200
+0.11220
+0.20000
+0.31400
+0.31400
+0.31400
 0.33300
 1.00000
 1.00000
 1.00000
-1.00000
-1.12000
+1.07343
 1.12000
 1.12200
-1.12200
-2.00000
 2.00000
 3.14000
 3.14000
 3.14000
-3.14000
-3.14000
-3.14000
-10.00000
 10.00000
-10.73430
 10.73433
+12.40000
+12.52000
 124.00000
-124.00000
-125.20000
 125.20000
+2323.22344
 23232.23435
-23232.23440
-2389432.23750
-2389432.23750
-1234567890.12350
+238943.22375
+238943.22375
+123456789.01235
 PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
 CREATE TABLE DECIMAL_6_3 STORED AS ORC AS SELECT key + 5.5 AS k, value * 11 AS v from DECIMAL_6_1 ORDER BY v
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -655,7 +655,7 @@ STAGE PLANS:
                   Statistics: Num rows: 27 Data size: 2684 Basic stats: COMPLETE Column stats: COMPLETE
                   TableScan Vectorization:
                       native: true
-                      vectorizationSchemaColumns: [0:key:decimal(10,5), 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
+                      vectorizationSchemaColumns: [0:key:decimal(10,5)/DECIMAL_64, 1:value:int, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: (key + 5.5) (type: decimal(11,5)), (value * 11) (type: int)
                     outputColumnNames: _col0, _col1
@@ -663,7 +663,7 @@ STAGE PLANS:
                         className: VectorSelectOperator
                         native: true
                         projectedOutputColumnNums: [3, 4]
-                        selectExpressions: DecimalColAddDecimalScalar(col 0:decimal(10,5), val 5.5) -> 3:decimal(11,5), LongColMultiplyLongScalar(col 1:int, val 11) -> 4:int
+                        selectExpressions: Decimal64ColAddDecimal64Scalar(col 0:decimal(10,5)/DECIMAL_64, decimal64Val 550000, decimalVal 5.5) -> 3:decimal(11,5)/DECIMAL_64, LongColMultiplyLongScalar(col 1:int, val 11) -> 4:int
                     Statistics: Num rows: 27 Data size: 3132 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: _col1 (type: int)
@@ -681,8 +681,8 @@ STAGE PLANS:
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
-                inputFormatFeatureSupport: []
-                featureSupportInUse: []
+                inputFormatFeatureSupport: [DECIMAL_64]
+                featureSupportInUse: [DECIMAL_64]
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
                 allNative: true
                 usesVectorUDFAdaptor: false
@@ -690,9 +690,9 @@ STAGE PLANS:
                 rowBatchContext:
                     dataColumnCount: 2
                     includeColumns: [0, 1]
-                    dataColumns: key:decimal(10,5), value:int
+                    dataColumns: key:decimal(10,5)/DECIMAL_64, value:int
                     partitionColumnCount: 0
-                    scratchColumnTypeNames: [decimal(11,5), bigint]
+                    scratchColumnTypeNames: [decimal(11,5)/DECIMAL_64, bigint]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization: