You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by xu...@apache.org on 2018/02/06 03:36:38 UTC

[1/2] hive git commit: HIVE-18048: Vectorization: Support Struct type with vectorization (Colin Ma, reviewed by Ferdinand Xu)

Repository: hive
Updated Branches:
  refs/heads/master 1bcc88f15 -> ac721836f


http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
new file mode 100644
index 0000000..7249363
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/parquet_struct_type_vectorization.q.out
@@ -0,0 +1,535 @@
+PREHOOK: query: DROP TABLE parquet_struct_type_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_struct_type_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE parquet_struct_type
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_struct_type
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE parquet_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_struct_type_staging
+POSTHOOK: query: CREATE TABLE parquet_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_struct_type_staging
+PREHOOK: query: CREATE TABLE parquet_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS PARQUET
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: CREATE TABLE parquet_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS PARQUET
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_struct_type
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE parquet_struct_type_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@parquet_struct_type_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE parquet_struct_type_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@parquet_struct_type_staging
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1024
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type_staging
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1024
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type_staging
+POSTHOOK: Output: default@parquet_struct_type
+POSTHOOK: Lineage: parquet_struct_type.id SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st1 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st2 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from parquet_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1023
+PREHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_struct_type
+                  Statistics: Num rows: 1023 Data size: 459256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Select Operator
+                    expressions: st1 (type: struct<f1:int,f2:string>), st1.f1 (type: int), st1.f2 (type: string), st2 (type: struct<f1:int,f3:string>), st2.f1 (type: int), st2.f3 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [1, 4, 5, 2, 6, 7]
+                        selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int, VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 1:int) -> 5:string, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 0:int) -> 6:int, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 1:int) -> 7:string
+                    Statistics: Num rows: 1023 Data size: 459256 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
+                      Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
+                        Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: parquet_struct_type
+                  Pruned Column Paths: st1.f1
+                  Statistics: Num rows: 1023 Data size: 229628 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterLongColGreaterLongScalar(col 4:int, val 500)(children: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int)
+                    predicate: (st1.f1 > 500) (type: boolean)
+                    Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: st1.f1 (type: int)
+                      outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [4]
+                          selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int
+                      Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col0)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 4:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs (cache only)
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0]
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 170 Data size: 38158 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: bigint), _col0 (type: int)
+                  outputColumnNames: _col0, _col1
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [1, 0]
+                  Statistics: Num rows: 170 Data size: 38158 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 10
+                    Limit Vectorization:
+                        className: VectorLimitOperator
+                        native: true
+                    Statistics: Num rows: 10 Data size: 2240 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 10 Data size: 2240 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1025
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type_staging
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1025
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type_staging
+POSTHOOK: Output: default@parquet_struct_type
+POSTHOOK: Lineage: parquet_struct_type.id SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st1 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st2 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from parquet_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1024
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1026
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type_staging
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1026
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type_staging
+POSTHOOK: Output: default@parquet_struct_type
+POSTHOOK: Lineage: parquet_struct_type.id SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st1 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st2 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from parquet_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1025
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
index afdfa62..d211358 100644
--- a/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_complex_all.q.out
@@ -584,25 +584,45 @@ STAGE PLANS:
                   alias: orc_create_complex
                   Pruned Column Paths: strct.b
                   Statistics: Num rows: 3 Data size: 1800 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:str:string, 1:mp:map<string,string>, 2:lst:array<string>, 3:strct:struct<a:string,b:string>, 4:val:string, 5:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: strct.b (type: string), str (type: string)
                     outputColumnNames: _col0, _col1
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [6, 0]
+                        selectExpressions: VectorUDFStructField(col 3:struct<a:string,b:string>, col 1:int) -> 6:string
                     Statistics: Num rows: 3 Data size: 1800 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
                       Statistics: Num rows: 3 Data size: 1800 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: SELECT operator: Could not vectorize expression (mode = PROJECTION): Column[strct].b
-                vectorized: false
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 5
+                    includeColumns: [0, 3]
+                    dataColumns: str:string, mp:map<string,string>, lst:array<string>, strct:struct<a:string,b:string>, val:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [string]
 
   Stage: Stage-0
     Fetch Operator
@@ -1265,12 +1285,28 @@ STAGE PLANS:
                   alias: orc_create_complex
                   Pruned Column Paths: strct.b
                   Statistics: Num rows: 13503 Data size: 7697400 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                      vectorizationSchemaColumns: [0:str:string, 1:mp:map<string,string>, 2:lst:array<string>, 3:strct:struct<a:string,b:string>, 4:val:string, 5:ROW__ID:struct<transactionid:bigint,bucketid:int,rowid:bigint>]
                   Select Operator
                     expressions: strct.b (type: string), val (type: string)
                     outputColumnNames: _col0, _col1
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [6, 4]
+                        selectExpressions: VectorUDFStructField(col 3:struct<a:string,b:string>, col 1:int) -> 6:string
                     Statistics: Num rows: 13503 Data size: 7697400 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(_col1)
+                      Group By Vectorization:
+                          aggregators: VectorUDAFCount(col 4:string) -> bigint
+                          className: VectorGroupByOperator
+                          groupByMode: HASH
+                          keyExpressions: col 6:string
+                          native: false
+                          vectorProcessingMode: HASH
+                          projectedOutputColumnNums: [0]
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
@@ -1279,16 +1315,31 @@ STAGE PLANS:
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
+                        Reduce Sink Vectorization:
+                            className: VectorReduceSinkStringOperator
+                            keyColumnNums: [0]
+                            native: true
+                            nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                            valueColumnNums: [1]
                         Statistics: Num rows: 13503 Data size: 7697400 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
-            Execution mode: llap
+            Execution mode: vectorized, llap
             LLAP IO: all inputs
             Map Vectorization:
                 enabled: true
                 enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
                 inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
-                notVectorizedReason: SELECT operator: Could not vectorize expression (mode = PROJECTION): Column[strct].b
-                vectorized: false
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+                rowBatchContext:
+                    dataColumnCount: 5
+                    includeColumns: [3, 4]
+                    dataColumns: str:string, mp:map<string,string>, lst:array<string>, strct:struct<a:string,b:string>, val:string
+                    partitionColumnCount: 0
+                    scratchColumnTypeNames: [string]
         Reducer 2 
             Execution mode: vectorized, llap
             Reduce Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
new file mode 100644
index 0000000..c67e8d1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/orc_struct_type_vectorization.q.out
@@ -0,0 +1,503 @@
+PREHOOK: query: DROP TABLE orc_struct_type_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_struct_type_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_struct_type
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_struct_type
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orc_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_struct_type_staging
+POSTHOOK: query: CREATE TABLE orc_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_struct_type_staging
+PREHOOK: query: CREATE TABLE orc_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: CREATE TABLE orc_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_struct_type
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE orc_struct_type_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_struct_type_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE orc_struct_type_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_struct_type_staging
+PREHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1024
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type_staging
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1024
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type_staging
+POSTHOOK: Output: default@orc_struct_type
+POSTHOOK: Lineage: orc_struct_type.id SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st1 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st2 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from orc_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orc_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1023
+PREHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: orc_struct_type
+            Statistics: Num rows: 1023 Data size: 196416 Basic stats: COMPLETE Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Select Operator
+              expressions: st1 (type: struct<f1:int,f2:string>), st1.f1 (type: int), st1.f2 (type: string), st2 (type: struct<f1:int,f3:string>), st2.f1 (type: int), st2.f3 (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [1, 4, 5, 2, 6, 7]
+                  selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int, VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 1:int) -> 5:string, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 0:int) -> 6:int, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 1:int) -> 7:string
+              Statistics: Num rows: 1023 Data size: 196416 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 10
+                Limit Vectorization:
+                    className: VectorLimitOperator
+                    native: true
+                Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
+                  Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: orc_struct_type
+            Pruned Column Paths: st1.f1
+            Statistics: Num rows: 1023 Data size: 196416 Basic stats: COMPLETE Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: FilterLongColGreaterLongScalar(col 4:int, val 500)(children: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int)
+              predicate: (st1.f1 > 500) (type: boolean)
+              Statistics: Num rows: 341 Data size: 65472 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: st1.f1 (type: int)
+                outputColumnNames: _col0
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [4]
+                    selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int
+                Statistics: Num rows: 341 Data size: 65472 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  Group By Vectorization:
+                      aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                      className: VectorGroupByOperator
+                      groupByMode: HASH
+                      keyExpressions: col 4:int
+                      native: false
+                      vectorProcessingMode: HASH
+                      projectedOutputColumnNums: [0]
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 341 Data size: 65472 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkOperator
+                        native: false
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+                    Statistics: Num rows: 341 Data size: 65472 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 32640 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint), _col0 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 170 Data size: 32640 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 10
+              Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 10 Data size: 1920 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1025
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type_staging
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1025
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type_staging
+POSTHOOK: Output: default@orc_struct_type
+POSTHOOK: Lineage: orc_struct_type.id SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st1 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st2 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from orc_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orc_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1024
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1026
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type_staging
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1026
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type_staging
+POSTHOOK: Output: default@orc_struct_type
+POSTHOOK: Lineage: orc_struct_type.id SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st1 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st2 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from orc_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from orc_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1025
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+#### A masked pattern was here ####
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/results/clientpositive/parquet_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/parquet_struct_type_vectorization.q.out
new file mode 100644
index 0000000..ed9bb09
--- /dev/null
+++ b/ql/src/test/results/clientpositive/parquet_struct_type_vectorization.q.out
@@ -0,0 +1,503 @@
+PREHOOK: query: DROP TABLE parquet_struct_type_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_struct_type_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE parquet_struct_type
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE parquet_struct_type
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE parquet_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_struct_type_staging
+POSTHOOK: query: CREATE TABLE parquet_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_struct_type_staging
+PREHOOK: query: CREATE TABLE parquet_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS PARQUET
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: CREATE TABLE parquet_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS PARQUET
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_struct_type
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE parquet_struct_type_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@parquet_struct_type_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE parquet_struct_type_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@parquet_struct_type_staging
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1024
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type_staging
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1024
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type_staging
+POSTHOOK: Output: default@parquet_struct_type
+POSTHOOK: Lineage: parquet_struct_type.id SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st1 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st2 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1023
+PREHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_struct_type
+            Statistics: Num rows: 1023 Data size: 3069 Basic stats: COMPLETE Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Select Operator
+              expressions: st1 (type: struct<f1:int,f2:string>), st1.f1 (type: int), st1.f2 (type: string), st2 (type: struct<f1:int,f3:string>), st2.f1 (type: int), st2.f3 (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              Select Vectorization:
+                  className: VectorSelectOperator
+                  native: true
+                  projectedOutputColumnNums: [1, 4, 5, 2, 6, 7]
+                  selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int, VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 1:int) -> 5:string, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 0:int) -> 6:int, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 1:int) -> 7:string
+              Statistics: Num rows: 1023 Data size: 3069 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 10
+                Limit Vectorization:
+                    className: VectorLimitOperator
+                    native: true
+                Statistics: Num rows: 10 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  File Sink Vectorization:
+                      className: VectorFileSinkOperator
+                      native: false
+                  Statistics: Num rows: 10 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: parquet_struct_type
+            Pruned Column Paths: st1.f1
+            Statistics: Num rows: 1023 Data size: 3069 Basic stats: COMPLETE Column stats: NONE
+            TableScan Vectorization:
+                native: true
+            Filter Operator
+              Filter Vectorization:
+                  className: VectorFilterOperator
+                  native: true
+                  predicateExpression: FilterLongColGreaterLongScalar(col 4:int, val 500)(children: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int)
+              predicate: (st1.f1 > 500) (type: boolean)
+              Statistics: Num rows: 341 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: st1.f1 (type: int)
+                outputColumnNames: _col0
+                Select Vectorization:
+                    className: VectorSelectOperator
+                    native: true
+                    projectedOutputColumnNums: [4]
+                    selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int
+                Statistics: Num rows: 341 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: sum(_col0)
+                  Group By Vectorization:
+                      aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                      className: VectorGroupByOperator
+                      groupByMode: HASH
+                      keyExpressions: col 4:int
+                      native: false
+                      vectorProcessingMode: HASH
+                      projectedOutputColumnNums: [0]
+                  keys: _col0 (type: int)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 341 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Reduce Sink Vectorization:
+                        className: VectorReduceSinkOperator
+                        native: false
+                        nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                        nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+                    Statistics: Num rows: 341 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
+                    TopN Hash Memory Usage: 0.1
+                    value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Map Vectorization:
+          enabled: true
+          enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+          inputFormatFeatureSupport: []
+          featureSupportInUse: []
+          inputFileFormats: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat
+          allNative: false
+          usesVectorUDFAdaptor: false
+          vectorized: true
+      Reduce Vectorization:
+          enabled: false
+          enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true
+          enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 170 Data size: 510 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint), _col0 (type: int)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 170 Data size: 510 Basic stats: COMPLETE Column stats: NONE
+            Limit
+              Number of rows: 10
+              Statistics: Num rows: 10 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 10 Data size: 30 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1025
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type_staging
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1025
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type_staging
+POSTHOOK: Output: default@parquet_struct_type
+POSTHOOK: Lineage: parquet_struct_type.id SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st1 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st2 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1024
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1026
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type_staging
+PREHOOK: Output: default@parquet_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1026
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type_staging
+POSTHOOK: Output: default@parquet_struct_type
+POSTHOOK: Lineage: parquet_struct_type.id SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st1 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: parquet_struct_type.st2 SIMPLE [(parquet_struct_type_staging)parquet_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from parquet_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from parquet_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1025
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select st1.f1, st2.f1 from parquet_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+POSTHOOK: query: select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_struct_type
+#### A masked pattern was here ####
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510


[2/2] hive git commit: HIVE-18048: Vectorization: Support Struct type with vectorization (Colin Ma, reviewed by Ferdinand Xu)

Posted by xu...@apache.org.
HIVE-18048: Vectorization: Support Struct type with vectorization (Colin Ma, reviewed by Ferdinand Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ac721836
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ac721836
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ac721836

Branch: refs/heads/master
Commit: ac721836f182b6f4cbeb07c2dd1f56692276cbc0
Parents: 1bcc88f
Author: Ferdinand Xu <ch...@intel.com>
Authored: Tue Feb 6 10:46:08 2018 +0800
Committer: Ferdinand Xu <ch...@intel.com>
Committed: Tue Feb 6 10:46:08 2018 +0800

----------------------------------------------------------------------
 data/files/struct_type.txt                      | 1025 ++++++++++++++++++
 .../test/resources/testconfiguration.properties |    2 +
 .../ql/exec/vector/VectorizationContext.java    |   45 +-
 .../expressions/VectorUDFStructField.java       |   98 ++
 .../ql/udf/generic/GenericUDFStructField.java   |   53 +
 .../orc_struct_type_vectorization.q             |   65 ++
 .../parquet_struct_type_vectorization.q         |   65 ++
 .../llap/orc_struct_type_vectorization.q.out    |  535 +++++++++
 .../parquet_struct_type_vectorization.q.out     |  535 +++++++++
 .../llap/vector_complex_all.q.out               |   63 +-
 .../orc_struct_type_vectorization.q.out         |  503 +++++++++
 .../parquet_struct_type_vectorization.q.out     |  503 +++++++++
 12 files changed, 3484 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/data/files/struct_type.txt
----------------------------------------------------------------------
diff --git a/data/files/struct_type.txt b/data/files/struct_type.txt
new file mode 100644
index 0000000..bbe9e94
--- /dev/null
+++ b/data/files/struct_type.txt
@@ -0,0 +1,1025 @@
+1|1,str1|2001,str2001
+2|2,str2|2002,str2002
+3|3,str3|2003,str2003
+4|4,str4|2004,str2004
+5|5,str5|2005,str2005
+6|6,str6|2006,str2006
+7|7,str7|2007,str2007
+8|8,str8|2008,str2008
+9|9,str9|2009,str2009
+10|10,str10|2010,str2010
+11|11,str11|2011,str2011
+12|12,str12|2012,str2012
+13|13,str13|2013,str2013
+14|14,str14|2014,str2014
+15|15,str15|2015,str2015
+16|16,str16|2016,str2016
+17|17,str17|2017,str2017
+18|18,str18|2018,str2018
+19|19,str19|2019,str2019
+20|20,str20|2020,str2020
+21|21,str21|2021,str2021
+22|22,str22|2022,str2022
+23|23,str23|2023,str2023
+24|24,str24|2024,str2024
+25|25,str25|2025,str2025
+26|26,str26|2026,str2026
+27|27,str27|2027,str2027
+28|28,str28|2028,str2028
+29|29,str29|2029,str2029
+30|30,str30|2030,str2030
+31|31,str31|2031,str2031
+32|32,str32|2032,str2032
+33|33,str33|2033,str2033
+34|34,str34|2034,str2034
+35|35,str35|2035,str2035
+36|36,str36|2036,str2036
+37|37,str37|2037,str2037
+38|38,str38|2038,str2038
+39|39,str39|2039,str2039
+40|40,str40|2040,str2040
+41|41,str41|2041,str2041
+42|42,str42|2042,str2042
+43|43,str43|2043,str2043
+44|44,str44|2044,str2044
+45|45,str45|2045,str2045
+46|46,str46|2046,str2046
+47|47,str47|2047,str2047
+48|48,str48|2048,str2048
+49|49,str49|2049,str2049
+50|50,str50|2050,str2050
+51|51,str51|2051,str2051
+52|52,str52|2052,str2052
+53|53,str53|2053,str2053
+54|54,str54|2054,str2054
+55|55,str55|2055,str2055
+56|56,str56|2056,str2056
+57|57,str57|2057,str2057
+58|58,str58|2058,str2058
+59|59,str59|2059,str2059
+60|60,str60|2060,str2060
+61|61,str61|2061,str2061
+62|62,str62|2062,str2062
+63|63,str63|2063,str2063
+64|64,str64|2064,str2064
+65|65,str65|2065,str2065
+66|66,str66|2066,str2066
+67|67,str67|2067,str2067
+68|68,str68|2068,str2068
+69|69,str69|2069,str2069
+70|70,str70|2070,str2070
+71|71,str71|2071,str2071
+72|72,str72|2072,str2072
+73|73,str73|2073,str2073
+74|74,str74|2074,str2074
+75|75,str75|2075,str2075
+76|76,str76|2076,str2076
+77|77,str77|2077,str2077
+78|78,str78|2078,str2078
+79|79,str79|2079,str2079
+80|80,str80|2080,str2080
+81|81,str81|2081,str2081
+82|82,str82|2082,str2082
+83|83,str83|2083,str2083
+84|84,str84|2084,str2084
+85|85,str85|2085,str2085
+86|86,str86|2086,str2086
+87|87,str87|2087,str2087
+88|88,str88|2088,str2088
+89|89,str89|2089,str2089
+90|90,str90|2090,str2090
+91|91,str91|2091,str2091
+92|92,str92|2092,str2092
+93|93,str93|2093,str2093
+94|94,str94|2094,str2094
+95|95,str95|2095,str2095
+96|96,str96|2096,str2096
+97|97,str97|2097,str2097
+98|98,str98|2098,str2098
+99|99,str99|2099,str2099
+100|100,str100|2100,str2100
+101|101,str101|2101,str2101
+102|102,str102|2102,str2102
+103|103,str103|2103,str2103
+104|104,str104|2104,str2104
+105|105,str105|2105,str2105
+106|106,str106|2106,str2106
+107|107,str107|2107,str2107
+108|108,str108|2108,str2108
+109|109,str109|2109,str2109
+110|110,str110|2110,str2110
+111|111,str111|2111,str2111
+112|112,str112|2112,str2112
+113|113,str113|2113,str2113
+114|114,str114|2114,str2114
+115|115,str115|2115,str2115
+116|116,str116|2116,str2116
+117|117,str117|2117,str2117
+118|118,str118|2118,str2118
+119|119,str119|2119,str2119
+120|120,str120|2120,str2120
+121|121,str121|2121,str2121
+122|122,str122|2122,str2122
+123|123,str123|2123,str2123
+124|124,str124|2124,str2124
+125|125,str125|2125,str2125
+126|126,str126|2126,str2126
+127|127,str127|2127,str2127
+128|128,str128|2128,str2128
+129|129,str129|2129,str2129
+130|130,str130|2130,str2130
+131|131,str131|2131,str2131
+132|132,str132|2132,str2132
+133|133,str133|2133,str2133
+134|134,str134|2134,str2134
+135|135,str135|2135,str2135
+136|136,str136|2136,str2136
+137|137,str137|2137,str2137
+138|138,str138|2138,str2138
+139|139,str139|2139,str2139
+140|140,str140|2140,str2140
+141|141,str141|2141,str2141
+142|142,str142|2142,str2142
+143|143,str143|2143,str2143
+144|144,str144|2144,str2144
+145|145,str145|2145,str2145
+146|146,str146|2146,str2146
+147|147,str147|2147,str2147
+148|148,str148|2148,str2148
+149|149,str149|2149,str2149
+150|150,str150|2150,str2150
+151|151,str151|2151,str2151
+152|152,str152|2152,str2152
+153|153,str153|2153,str2153
+154|154,str154|2154,str2154
+155|155,str155|2155,str2155
+156|156,str156|2156,str2156
+157|157,str157|2157,str2157
+158|158,str158|2158,str2158
+159|159,str159|2159,str2159
+160|160,str160|2160,str2160
+161|161,str161|2161,str2161
+162|162,str162|2162,str2162
+163|163,str163|2163,str2163
+164|164,str164|2164,str2164
+165|165,str165|2165,str2165
+166|166,str166|2166,str2166
+167|167,str167|2167,str2167
+168|168,str168|2168,str2168
+169|169,str169|2169,str2169
+170|170,str170|2170,str2170
+171|171,str171|2171,str2171
+172|172,str172|2172,str2172
+173|173,str173|2173,str2173
+174|174,str174|2174,str2174
+175|175,str175|2175,str2175
+176|176,str176|2176,str2176
+177|177,str177|2177,str2177
+178|178,str178|2178,str2178
+179|179,str179|2179,str2179
+180|180,str180|2180,str2180
+181|181,str181|2181,str2181
+182|182,str182|2182,str2182
+183|183,str183|2183,str2183
+184|184,str184|2184,str2184
+185|185,str185|2185,str2185
+186|186,str186|2186,str2186
+187|187,str187|2187,str2187
+188|188,str188|2188,str2188
+189|189,str189|2189,str2189
+190|190,str190|2190,str2190
+191|191,str191|2191,str2191
+192|192,str192|2192,str2192
+193|193,str193|2193,str2193
+194|194,str194|2194,str2194
+195|195,str195|2195,str2195
+196|196,str196|2196,str2196
+197|197,str197|2197,str2197
+198|198,str198|2198,str2198
+199|199,str199|2199,str2199
+200|200,str200|2200,str2200
+201|201,str201|2201,str2201
+202|202,str202|2202,str2202
+203|203,str203|2203,str2203
+204|204,str204|2204,str2204
+205|205,str205|2205,str2205
+206|206,str206|2206,str2206
+207|207,str207|2207,str2207
+208|208,str208|2208,str2208
+209|209,str209|2209,str2209
+210|210,str210|2210,str2210
+211|211,str211|2211,str2211
+212|212,str212|2212,str2212
+213|213,str213|2213,str2213
+214|214,str214|2214,str2214
+215|215,str215|2215,str2215
+216|216,str216|2216,str2216
+217|217,str217|2217,str2217
+218|218,str218|2218,str2218
+219|219,str219|2219,str2219
+220|220,str220|2220,str2220
+221|221,str221|2221,str2221
+222|222,str222|2222,str2222
+223|223,str223|2223,str2223
+224|224,str224|2224,str2224
+225|225,str225|2225,str2225
+226|226,str226|2226,str2226
+227|227,str227|2227,str2227
+228|228,str228|2228,str2228
+229|229,str229|2229,str2229
+230|230,str230|2230,str2230
+231|231,str231|2231,str2231
+232|232,str232|2232,str2232
+233|233,str233|2233,str2233
+234|234,str234|2234,str2234
+235|235,str235|2235,str2235
+236|236,str236|2236,str2236
+237|237,str237|2237,str2237
+238|238,str238|2238,str2238
+239|239,str239|2239,str2239
+240|240,str240|2240,str2240
+241|241,str241|2241,str2241
+242|242,str242|2242,str2242
+243|243,str243|2243,str2243
+244|244,str244|2244,str2244
+245|245,str245|2245,str2245
+246|246,str246|2246,str2246
+247|247,str247|2247,str2247
+248|248,str248|2248,str2248
+249|249,str249|2249,str2249
+250|250,str250|2250,str2250
+251|251,str251|2251,str2251
+252|252,str252|2252,str2252
+253|253,str253|2253,str2253
+254|254,str254|2254,str2254
+255|255,str255|2255,str2255
+256|256,str256|2256,str2256
+257|257,str257|2257,str2257
+258|258,str258|2258,str2258
+259|259,str259|2259,str2259
+260|260,str260|2260,str2260
+261|261,str261|2261,str2261
+262|262,str262|2262,str2262
+263|263,str263|2263,str2263
+264|264,str264|2264,str2264
+265|265,str265|2265,str2265
+266|266,str266|2266,str2266
+267|267,str267|2267,str2267
+268|268,str268|2268,str2268
+269|269,str269|2269,str2269
+270|270,str270|2270,str2270
+271|271,str271|2271,str2271
+272|272,str272|2272,str2272
+273|273,str273|2273,str2273
+274|274,str274|2274,str2274
+275|275,str275|2275,str2275
+276|276,str276|2276,str2276
+277|277,str277|2277,str2277
+278|278,str278|2278,str2278
+279|279,str279|2279,str2279
+280|280,str280|2280,str2280
+281|281,str281|2281,str2281
+282|282,str282|2282,str2282
+283|283,str283|2283,str2283
+284|284,str284|2284,str2284
+285|285,str285|2285,str2285
+286|286,str286|2286,str2286
+287|287,str287|2287,str2287
+288|288,str288|2288,str2288
+289|289,str289|2289,str2289
+290|290,str290|2290,str2290
+291|291,str291|2291,str2291
+292|292,str292|2292,str2292
+293|293,str293|2293,str2293
+294|294,str294|2294,str2294
+295|295,str295|2295,str2295
+296|296,str296|2296,str2296
+297|297,str297|2297,str2297
+298|298,str298|2298,str2298
+299|299,str299|2299,str2299
+300|300,str300|2300,str2300
+301|301,str301|2301,str2301
+302|302,str302|2302,str2302
+303|303,str303|2303,str2303
+304|304,str304|2304,str2304
+305|305,str305|2305,str2305
+306|306,str306|2306,str2306
+307|307,str307|2307,str2307
+308|308,str308|2308,str2308
+309|309,str309|2309,str2309
+310|310,str310|2310,str2310
+311|311,str311|2311,str2311
+312|312,str312|2312,str2312
+313|313,str313|2313,str2313
+314|314,str314|2314,str2314
+315|315,str315|2315,str2315
+316|316,str316|2316,str2316
+317|317,str317|2317,str2317
+318|318,str318|2318,str2318
+319|319,str319|2319,str2319
+320|320,str320|2320,str2320
+321|321,str321|2321,str2321
+322|322,str322|2322,str2322
+323|323,str323|2323,str2323
+324|324,str324|2324,str2324
+325|325,str325|2325,str2325
+326|326,str326|2326,str2326
+327|327,str327|2327,str2327
+328|328,str328|2328,str2328
+329|329,str329|2329,str2329
+330|330,str330|2330,str2330
+331|331,str331|2331,str2331
+332|332,str332|2332,str2332
+333|333,str333|2333,str2333
+334|334,str334|2334,str2334
+335|335,str335|2335,str2335
+336|336,str336|2336,str2336
+337|337,str337|2337,str2337
+338|338,str338|2338,str2338
+339|339,str339|2339,str2339
+340|340,str340|2340,str2340
+341|341,str341|2341,str2341
+342|342,str342|2342,str2342
+343|343,str343|2343,str2343
+344|344,str344|2344,str2344
+345|345,str345|2345,str2345
+346|346,str346|2346,str2346
+347|347,str347|2347,str2347
+348|348,str348|2348,str2348
+349|349,str349|2349,str2349
+350|350,str350|2350,str2350
+351|351,str351|2351,str2351
+352|352,str352|2352,str2352
+353|353,str353|2353,str2353
+354|354,str354|2354,str2354
+355|355,str355|2355,str2355
+356|356,str356|2356,str2356
+357|357,str357|2357,str2357
+358|358,str358|2358,str2358
+359|359,str359|2359,str2359
+360|360,str360|2360,str2360
+361|361,str361|2361,str2361
+362|362,str362|2362,str2362
+363|363,str363|2363,str2363
+364|364,str364|2364,str2364
+365|365,str365|2365,str2365
+366|366,str366|2366,str2366
+367|367,str367|2367,str2367
+368|368,str368|2368,str2368
+369|369,str369|2369,str2369
+370|370,str370|2370,str2370
+371|371,str371|2371,str2371
+372|372,str372|2372,str2372
+373|373,str373|2373,str2373
+374|374,str374|2374,str2374
+375|375,str375|2375,str2375
+376|376,str376|2376,str2376
+377|377,str377|2377,str2377
+378|378,str378|2378,str2378
+379|379,str379|2379,str2379
+380|380,str380|2380,str2380
+381|381,str381|2381,str2381
+382|382,str382|2382,str2382
+383|383,str383|2383,str2383
+384|384,str384|2384,str2384
+385|385,str385|2385,str2385
+386|386,str386|2386,str2386
+387|387,str387|2387,str2387
+388|388,str388|2388,str2388
+389|389,str389|2389,str2389
+390|390,str390|2390,str2390
+391|391,str391|2391,str2391
+392|392,str392|2392,str2392
+393|393,str393|2393,str2393
+394|394,str394|2394,str2394
+395|395,str395|2395,str2395
+396|396,str396|2396,str2396
+397|397,str397|2397,str2397
+398|398,str398|2398,str2398
+399|399,str399|2399,str2399
+400|400,str400|2400,str2400
+401|401,str401|2401,str2401
+402|402,str402|2402,str2402
+403|403,str403|2403,str2403
+404|404,str404|2404,str2404
+405|405,str405|2405,str2405
+406|406,str406|2406,str2406
+407|407,str407|2407,str2407
+408|408,str408|2408,str2408
+409|409,str409|2409,str2409
+410|410,str410|2410,str2410
+411|411,str411|2411,str2411
+412|412,str412|2412,str2412
+413|413,str413|2413,str2413
+414|414,str414|2414,str2414
+415|415,str415|2415,str2415
+416|416,str416|2416,str2416
+417|417,str417|2417,str2417
+418|418,str418|2418,str2418
+419|419,str419|2419,str2419
+420|420,str420|2420,str2420
+421|421,str421|2421,str2421
+422|422,str422|2422,str2422
+423|423,str423|2423,str2423
+424|424,str424|2424,str2424
+425|425,str425|2425,str2425
+426|426,str426|2426,str2426
+427|427,str427|2427,str2427
+428|428,str428|2428,str2428
+429|429,str429|2429,str2429
+430|430,str430|2430,str2430
+431|431,str431|2431,str2431
+432|432,str432|2432,str2432
+433|433,str433|2433,str2433
+434|434,str434|2434,str2434
+435|435,str435|2435,str2435
+436|436,str436|2436,str2436
+437|437,str437|2437,str2437
+438|438,str438|2438,str2438
+439|439,str439|2439,str2439
+440|440,str440|2440,str2440
+441|441,str441|2441,str2441
+442|442,str442|2442,str2442
+443|443,str443|2443,str2443
+444|444,str444|2444,str2444
+445|445,str445|2445,str2445
+446|446,str446|2446,str2446
+447|447,str447|2447,str2447
+448|448,str448|2448,str2448
+449|449,str449|2449,str2449
+450|450,str450|2450,str2450
+451|451,str451|2451,str2451
+452|452,str452|2452,str2452
+453|453,str453|2453,str2453
+454|454,str454|2454,str2454
+455|455,str455|2455,str2455
+456|456,str456|2456,str2456
+457|457,str457|2457,str2457
+458|458,str458|2458,str2458
+459|459,str459|2459,str2459
+460|460,str460|2460,str2460
+461|461,str461|2461,str2461
+462|462,str462|2462,str2462
+463|463,str463|2463,str2463
+464|464,str464|2464,str2464
+465|465,str465|2465,str2465
+466|466,str466|2466,str2466
+467|467,str467|2467,str2467
+468|468,str468|2468,str2468
+469|469,str469|2469,str2469
+470|470,str470|2470,str2470
+471|471,str471|2471,str2471
+472|472,str472|2472,str2472
+473|473,str473|2473,str2473
+474|474,str474|2474,str2474
+475|475,str475|2475,str2475
+476|476,str476|2476,str2476
+477|477,str477|2477,str2477
+478|478,str478|2478,str2478
+479|479,str479|2479,str2479
+480|480,str480|2480,str2480
+481|481,str481|2481,str2481
+482|482,str482|2482,str2482
+483|483,str483|2483,str2483
+484|484,str484|2484,str2484
+485|485,str485|2485,str2485
+486|486,str486|2486,str2486
+487|487,str487|2487,str2487
+488|488,str488|2488,str2488
+489|489,str489|2489,str2489
+490|490,str490|2490,str2490
+491|491,str491|2491,str2491
+492|492,str492|2492,str2492
+493|493,str493|2493,str2493
+494|494,str494|2494,str2494
+495|495,str495|2495,str2495
+496|496,str496|2496,str2496
+497|497,str497|2497,str2497
+498|498,str498|2498,str2498
+499|499,str499|2499,str2499
+500|500,str500|2500,str2500
+501|501,str501|2501,str2501
+502|502,str502|2502,str2502
+503|503,str503|2503,str2503
+504|504,str504|2504,str2504
+505|505,str505|2505,str2505
+506|506,str506|2506,str2506
+507|507,str507|2507,str2507
+508|508,str508|2508,str2508
+509|509,str509|2509,str2509
+510|510,str510|2510,str2510
+511|511,str511|2511,str2511
+512|512,str512|2512,str2512
+513|513,str513|2513,str2513
+514|514,str514|2514,str2514
+515|515,str515|2515,str2515
+516|516,str516|2516,str2516
+517|517,str517|2517,str2517
+518|518,str518|2518,str2518
+519|519,str519|2519,str2519
+520|520,str520|2520,str2520
+521|521,str521|2521,str2521
+522|522,str522|2522,str2522
+523|523,str523|2523,str2523
+524|524,str524|2524,str2524
+525|525,str525|2525,str2525
+526|526,str526|2526,str2526
+527|527,str527|2527,str2527
+528|528,str528|2528,str2528
+529|529,str529|2529,str2529
+530|530,str530|2530,str2530
+531|531,str531|2531,str2531
+532|532,str532|2532,str2532
+533|533,str533|2533,str2533
+534|534,str534|2534,str2534
+535|535,str535|2535,str2535
+536|536,str536|2536,str2536
+537|537,str537|2537,str2537
+538|538,str538|2538,str2538
+539|539,str539|2539,str2539
+540|540,str540|2540,str2540
+541|541,str541|2541,str2541
+542|542,str542|2542,str2542
+543|543,str543|2543,str2543
+544|544,str544|2544,str2544
+545|545,str545|2545,str2545
+546|546,str546|2546,str2546
+547|547,str547|2547,str2547
+548|548,str548|2548,str2548
+549|549,str549|2549,str2549
+550|550,str550|2550,str2550
+551|551,str551|2551,str2551
+552|552,str552|2552,str2552
+553|553,str553|2553,str2553
+554|554,str554|2554,str2554
+555|555,str555|2555,str2555
+556|556,str556|2556,str2556
+557|557,str557|2557,str2557
+558|558,str558|2558,str2558
+559|559,str559|2559,str2559
+560|560,str560|2560,str2560
+561|561,str561|2561,str2561
+562|562,str562|2562,str2562
+563|563,str563|2563,str2563
+564|564,str564|2564,str2564
+565|565,str565|2565,str2565
+566|566,str566|2566,str2566
+567|567,str567|2567,str2567
+568|568,str568|2568,str2568
+569|569,str569|2569,str2569
+570|570,str570|2570,str2570
+571|571,str571|2571,str2571
+572|572,str572|2572,str2572
+573|573,str573|2573,str2573
+574|574,str574|2574,str2574
+575|575,str575|2575,str2575
+576|576,str576|2576,str2576
+577|577,str577|2577,str2577
+578|578,str578|2578,str2578
+579|579,str579|2579,str2579
+580|580,str580|2580,str2580
+581|581,str581|2581,str2581
+582|582,str582|2582,str2582
+583|583,str583|2583,str2583
+584|584,str584|2584,str2584
+585|585,str585|2585,str2585
+586|586,str586|2586,str2586
+587|587,str587|2587,str2587
+588|588,str588|2588,str2588
+589|589,str589|2589,str2589
+590|590,str590|2590,str2590
+591|591,str591|2591,str2591
+592|592,str592|2592,str2592
+593|593,str593|2593,str2593
+594|594,str594|2594,str2594
+595|595,str595|2595,str2595
+596|596,str596|2596,str2596
+597|597,str597|2597,str2597
+598|598,str598|2598,str2598
+599|599,str599|2599,str2599
+600|600,str600|2600,str2600
+601|601,str601|2601,str2601
+602|602,str602|2602,str2602
+603|603,str603|2603,str2603
+604|604,str604|2604,str2604
+605|605,str605|2605,str2605
+606|606,str606|2606,str2606
+607|607,str607|2607,str2607
+608|608,str608|2608,str2608
+609|609,str609|2609,str2609
+610|610,str610|2610,str2610
+611|611,str611|2611,str2611
+612|612,str612|2612,str2612
+613|613,str613|2613,str2613
+614|614,str614|2614,str2614
+615|615,str615|2615,str2615
+616|616,str616|2616,str2616
+617|617,str617|2617,str2617
+618|618,str618|2618,str2618
+619|619,str619|2619,str2619
+620|620,str620|2620,str2620
+621|621,str621|2621,str2621
+622|622,str622|2622,str2622
+623|623,str623|2623,str2623
+624|624,str624|2624,str2624
+625|625,str625|2625,str2625
+626|626,str626|2626,str2626
+627|627,str627|2627,str2627
+628|628,str628|2628,str2628
+629|629,str629|2629,str2629
+630|630,str630|2630,str2630
+631|631,str631|2631,str2631
+632|632,str632|2632,str2632
+633|633,str633|2633,str2633
+634|634,str634|2634,str2634
+635|635,str635|2635,str2635
+636|636,str636|2636,str2636
+637|637,str637|2637,str2637
+638|638,str638|2638,str2638
+639|639,str639|2639,str2639
+640|640,str640|2640,str2640
+641|641,str641|2641,str2641
+642|642,str642|2642,str2642
+643|643,str643|2643,str2643
+644|644,str644|2644,str2644
+645|645,str645|2645,str2645
+646|646,str646|2646,str2646
+647|647,str647|2647,str2647
+648|648,str648|2648,str2648
+649|649,str649|2649,str2649
+650|650,str650|2650,str2650
+651|651,str651|2651,str2651
+652|652,str652|2652,str2652
+653|653,str653|2653,str2653
+654|654,str654|2654,str2654
+655|655,str655|2655,str2655
+656|656,str656|2656,str2656
+657|657,str657|2657,str2657
+658|658,str658|2658,str2658
+659|659,str659|2659,str2659
+660|660,str660|2660,str2660
+661|661,str661|2661,str2661
+662|662,str662|2662,str2662
+663|663,str663|2663,str2663
+664|664,str664|2664,str2664
+665|665,str665|2665,str2665
+666|666,str666|2666,str2666
+667|667,str667|2667,str2667
+668|668,str668|2668,str2668
+669|669,str669|2669,str2669
+670|670,str670|2670,str2670
+671|671,str671|2671,str2671
+672|672,str672|2672,str2672
+673|673,str673|2673,str2673
+674|674,str674|2674,str2674
+675|675,str675|2675,str2675
+676|676,str676|2676,str2676
+677|677,str677|2677,str2677
+678|678,str678|2678,str2678
+679|679,str679|2679,str2679
+680|680,str680|2680,str2680
+681|681,str681|2681,str2681
+682|682,str682|2682,str2682
+683|683,str683|2683,str2683
+684|684,str684|2684,str2684
+685|685,str685|2685,str2685
+686|686,str686|2686,str2686
+687|687,str687|2687,str2687
+688|688,str688|2688,str2688
+689|689,str689|2689,str2689
+690|690,str690|2690,str2690
+691|691,str691|2691,str2691
+692|692,str692|2692,str2692
+693|693,str693|2693,str2693
+694|694,str694|2694,str2694
+695|695,str695|2695,str2695
+696|696,str696|2696,str2696
+697|697,str697|2697,str2697
+698|698,str698|2698,str2698
+699|699,str699|2699,str2699
+700|700,str700|2700,str2700
+701|701,str701|2701,str2701
+702|702,str702|2702,str2702
+703|703,str703|2703,str2703
+704|704,str704|2704,str2704
+705|705,str705|2705,str2705
+706|706,str706|2706,str2706
+707|707,str707|2707,str2707
+708|708,str708|2708,str2708
+709|709,str709|2709,str2709
+710|710,str710|2710,str2710
+711|711,str711|2711,str2711
+712|712,str712|2712,str2712
+713|713,str713|2713,str2713
+714|714,str714|2714,str2714
+715|715,str715|2715,str2715
+716|716,str716|2716,str2716
+717|717,str717|2717,str2717
+718|718,str718|2718,str2718
+719|719,str719|2719,str2719
+720|720,str720|2720,str2720
+721|721,str721|2721,str2721
+722|722,str722|2722,str2722
+723|723,str723|2723,str2723
+724|724,str724|2724,str2724
+725|725,str725|2725,str2725
+726|726,str726|2726,str2726
+727|727,str727|2727,str2727
+728|728,str728|2728,str2728
+729|729,str729|2729,str2729
+730|730,str730|2730,str2730
+731|731,str731|2731,str2731
+732|732,str732|2732,str2732
+733|733,str733|2733,str2733
+734|734,str734|2734,str2734
+735|735,str735|2735,str2735
+736|736,str736|2736,str2736
+737|737,str737|2737,str2737
+738|738,str738|2738,str2738
+739|739,str739|2739,str2739
+740|740,str740|2740,str2740
+741|741,str741|2741,str2741
+742|742,str742|2742,str2742
+743|743,str743|2743,str2743
+744|744,str744|2744,str2744
+745|745,str745|2745,str2745
+746|746,str746|2746,str2746
+747|747,str747|2747,str2747
+748|748,str748|2748,str2748
+749|749,str749|2749,str2749
+750|750,str750|2750,str2750
+751|751,str751|2751,str2751
+752|752,str752|2752,str2752
+753|753,str753|2753,str2753
+754|754,str754|2754,str2754
+755|755,str755|2755,str2755
+756|756,str756|2756,str2756
+757|757,str757|2757,str2757
+758|758,str758|2758,str2758
+759|759,str759|2759,str2759
+760|760,str760|2760,str2760
+761|761,str761|2761,str2761
+762|762,str762|2762,str2762
+763|763,str763|2763,str2763
+764|764,str764|2764,str2764
+765|765,str765|2765,str2765
+766|766,str766|2766,str2766
+767|767,str767|2767,str2767
+768|768,str768|2768,str2768
+769|769,str769|2769,str2769
+770|770,str770|2770,str2770
+771|771,str771|2771,str2771
+772|772,str772|2772,str2772
+773|773,str773|2773,str2773
+774|774,str774|2774,str2774
+775|775,str775|2775,str2775
+776|776,str776|2776,str2776
+777|777,str777|2777,str2777
+778|778,str778|2778,str2778
+779|779,str779|2779,str2779
+780|780,str780|2780,str2780
+781|781,str781|2781,str2781
+782|782,str782|2782,str2782
+783|783,str783|2783,str2783
+784|784,str784|2784,str2784
+785|785,str785|2785,str2785
+786|786,str786|2786,str2786
+787|787,str787|2787,str2787
+788|788,str788|2788,str2788
+789|789,str789|2789,str2789
+790|790,str790|2790,str2790
+791|791,str791|2791,str2791
+792|792,str792|2792,str2792
+793|793,str793|2793,str2793
+794|794,str794|2794,str2794
+795|795,str795|2795,str2795
+796|796,str796|2796,str2796
+797|797,str797|2797,str2797
+798|798,str798|2798,str2798
+799|799,str799|2799,str2799
+800|800,str800|2800,str2800
+801|801,str801|2801,str2801
+802|802,str802|2802,str2802
+803|803,str803|2803,str2803
+804|804,str804|2804,str2804
+805|805,str805|2805,str2805
+806|806,str806|2806,str2806
+807|807,str807|2807,str2807
+808|808,str808|2808,str2808
+809|809,str809|2809,str2809
+810|810,str810|2810,str2810
+811|811,str811|2811,str2811
+812|812,str812|2812,str2812
+813|813,str813|2813,str2813
+814|814,str814|2814,str2814
+815|815,str815|2815,str2815
+816|816,str816|2816,str2816
+817|817,str817|2817,str2817
+818|818,str818|2818,str2818
+819|819,str819|2819,str2819
+820|820,str820|2820,str2820
+821|821,str821|2821,str2821
+822|822,str822|2822,str2822
+823|823,str823|2823,str2823
+824|824,str824|2824,str2824
+825|825,str825|2825,str2825
+826|826,str826|2826,str2826
+827|827,str827|2827,str2827
+828|828,str828|2828,str2828
+829|829,str829|2829,str2829
+830|830,str830|2830,str2830
+831|831,str831|2831,str2831
+832|832,str832|2832,str2832
+833|833,str833|2833,str2833
+834|834,str834|2834,str2834
+835|835,str835|2835,str2835
+836|836,str836|2836,str2836
+837|837,str837|2837,str2837
+838|838,str838|2838,str2838
+839|839,str839|2839,str2839
+840|840,str840|2840,str2840
+841|841,str841|2841,str2841
+842|842,str842|2842,str2842
+843|843,str843|2843,str2843
+844|844,str844|2844,str2844
+845|845,str845|2845,str2845
+846|846,str846|2846,str2846
+847|847,str847|2847,str2847
+848|848,str848|2848,str2848
+849|849,str849|2849,str2849
+850|850,str850|2850,str2850
+851|851,str851|2851,str2851
+852|852,str852|2852,str2852
+853|853,str853|2853,str2853
+854|854,str854|2854,str2854
+855|855,str855|2855,str2855
+856|856,str856|2856,str2856
+857|857,str857|2857,str2857
+858|858,str858|2858,str2858
+859|859,str859|2859,str2859
+860|860,str860|2860,str2860
+861|861,str861|2861,str2861
+862|862,str862|2862,str2862
+863|863,str863|2863,str2863
+864|864,str864|2864,str2864
+865|865,str865|2865,str2865
+866|866,str866|2866,str2866
+867|867,str867|2867,str2867
+868|868,str868|2868,str2868
+869|869,str869|2869,str2869
+870|870,str870|2870,str2870
+871|871,str871|2871,str2871
+872|872,str872|2872,str2872
+873|873,str873|2873,str2873
+874|874,str874|2874,str2874
+875|875,str875|2875,str2875
+876|876,str876|2876,str2876
+877|877,str877|2877,str2877
+878|878,str878|2878,str2878
+879|879,str879|2879,str2879
+880|880,str880|2880,str2880
+881|881,str881|2881,str2881
+882|882,str882|2882,str2882
+883|883,str883|2883,str2883
+884|884,str884|2884,str2884
+885|885,str885|2885,str2885
+886|886,str886|2886,str2886
+887|887,str887|2887,str2887
+888|888,str888|2888,str2888
+889|889,str889|2889,str2889
+890|890,str890|2890,str2890
+891|891,str891|2891,str2891
+892|892,str892|2892,str2892
+893|893,str893|2893,str2893
+894|894,str894|2894,str2894
+895|895,str895|2895,str2895
+896|896,str896|2896,str2896
+897|897,str897|2897,str2897
+898|898,str898|2898,str2898
+899|899,str899|2899,str2899
+900|900,str900|2900,str2900
+901|901,str901|2901,str2901
+902|902,str902|2902,str2902
+903|903,str903|2903,str2903
+904|904,str904|2904,str2904
+905|905,str905|2905,str2905
+906|906,str906|2906,str2906
+907|907,str907|2907,str2907
+908|908,str908|2908,str2908
+909|909,str909|2909,str2909
+910|910,str910|2910,str2910
+911|911,str911|2911,str2911
+912|912,str912|2912,str2912
+913|913,str913|2913,str2913
+914|914,str914|2914,str2914
+915|915,str915|2915,str2915
+916|916,str916|2916,str2916
+917|917,str917|2917,str2917
+918|918,str918|2918,str2918
+919|919,str919|2919,str2919
+920|920,str920|2920,str2920
+921|921,str921|2921,str2921
+922|922,str922|2922,str2922
+923|923,str923|2923,str2923
+924|924,str924|2924,str2924
+925|925,str925|2925,str2925
+926|926,str926|2926,str2926
+927|927,str927|2927,str2927
+928|928,str928|2928,str2928
+929|929,str929|2929,str2929
+930|930,str930|2930,str2930
+931|931,str931|2931,str2931
+932|932,str932|2932,str2932
+933|933,str933|2933,str2933
+934|934,str934|2934,str2934
+935|935,str935|2935,str2935
+936|936,str936|2936,str2936
+937|937,str937|2937,str2937
+938|938,str938|2938,str2938
+939|939,str939|2939,str2939
+940|940,str940|2940,str2940
+941|941,str941|2941,str2941
+942|942,str942|2942,str2942
+943|943,str943|2943,str2943
+944|944,str944|2944,str2944
+945|945,str945|2945,str2945
+946|946,str946|2946,str2946
+947|947,str947|2947,str2947
+948|948,str948|2948,str2948
+949|949,str949|2949,str2949
+950|950,str950|2950,str2950
+951|951,str951|2951,str2951
+952|952,str952|2952,str2952
+953|953,str953|2953,str2953
+954|954,str954|2954,str2954
+955|955,str955|2955,str2955
+956|956,str956|2956,str2956
+957|957,str957|2957,str2957
+958|958,str958|2958,str2958
+959|959,str959|2959,str2959
+960|960,str960|2960,str2960
+961|961,str961|2961,str2961
+962|962,str962|2962,str2962
+963|963,str963|2963,str2963
+964|964,str964|2964,str2964
+965|965,str965|2965,str2965
+966|966,str966|2966,str2966
+967|967,str967|2967,str2967
+968|968,str968|2968,str2968
+969|969,str969|2969,str2969
+970|970,str970|2970,str2970
+971|971,str971|2971,str2971
+972|972,str972|2972,str2972
+973|973,str973|2973,str2973
+974|974,str974|2974,str2974
+975|975,str975|2975,str2975
+976|976,str976|2976,str2976
+977|977,str977|2977,str2977
+978|978,str978|2978,str2978
+979|979,str979|2979,str2979
+980|980,str980|2980,str2980
+981|981,str981|2981,str2981
+982|982,str982|2982,str2982
+983|983,str983|2983,str2983
+984|984,str984|2984,str2984
+985|985,str985|2985,str2985
+986|986,str986|2986,str2986
+987|987,str987|2987,str2987
+988|988,str988|2988,str2988
+989|989,str989|2989,str2989
+990|990,str990|2990,str2990
+991|991,str991|2991,str2991
+992|992,str992|2992,str2992
+993|993,str993|2993,str2993
+994|994,str994|2994,str2994
+995|995,str995|2995,str2995
+996|996,str996|2996,str2996
+997|997,str997|2997,str2997
+998|998,str998|2998,str2998
+999|999,str999|2999,str2999
+1000|1000,str1000|3000,str3000
+1001|1001,str1001|3001,str3001
+1002|1002,str1002|3002,str3002
+1003|1003,str1003|3003,str3003
+1004|1004,str1004|3004,str3004
+1005|1005,str1005|3005,str3005
+1006|1006,str1006|3006,str3006
+1007|1007,str1007|3007,str3007
+1008|1008,str1008|3008,str3008
+1009|1009,str1009|3009,str3009
+1010|1010,str1010|3010,str3010
+1011|1011,str1011|3011,str3011
+1012|1012,str1012|3012,str3012
+1013|1013,str1013|3013,str3013
+1014|1014,str1014|3014,str3014
+1015|1015,str1015|3015,str3015
+1016|1016,str1016|3016,str3016
+1017|1017,str1017|3017,str3017
+1018|1018,str1018|3018,str3018
+1019|1019,str1019|3019,str3019
+1020|1020,str1020|3020,str3020
+1021|1021,str1021|3021,str3021
+1022|1022,str1022|3022,str3022
+1023|1023,str1023|3023,str3023
+1024|1024,str1024|3024,str3024
+1025|1025,str1025|3025,str3025

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index fed9394..32b7551 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -82,6 +82,8 @@ minillap.shared.query.files=insert_into1.q,\
   parquet_types_vectorization.q,\
   parquet_complex_types_vectorization.q,\
   parquet_map_type_vectorization.q,\
+  parquet_struct_type_vectorization.q,\
+  orc_struct_type_vectorization.q,\
   union_type_chk.q,\
   cte_2.q,\
   cte_4.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index d46eb8d..8264e8a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDynamicValueDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.udf.*;
 import org.apache.hadoop.hive.ql.udf.generic.*;
 import org.apache.hadoop.hive.serde2.ByteStream.Output;
@@ -812,6 +813,10 @@ public class VectorizationContext {
           mode);
     } else if (exprDesc instanceof ExprNodeDynamicValueDesc) {
       ve = getDynamicValueVectorExpression((ExprNodeDynamicValueDesc) exprDesc, mode);
+    } else if (exprDesc instanceof ExprNodeFieldDesc) {
+      // Get the GenericUDFStructField to process the field of Struct type
+      ve = getGenericUDFStructField((ExprNodeFieldDesc)exprDesc,
+          mode, exprDesc.getTypeInfo());
     }
     if (ve == null) {
       throw new HiveException(
@@ -824,6 +829,41 @@ public class VectorizationContext {
     return ve;
   }
 
+  private VectorExpression getGenericUDFStructField(ExprNodeFieldDesc exprNodeFieldDesc,
+      VectorExpressionDescriptor.Mode mode, TypeInfo returnType) throws HiveException {
+    // set the arguments for GenericUDFStructField
+    List<ExprNodeDesc> children = new ArrayList<>(2);
+    children.add(exprNodeFieldDesc.getDesc());
+    children.add(new ExprNodeConstantDesc(getStructFieldIndex(exprNodeFieldDesc)));
+
+    return getVectorExpressionForUdf(null, GenericUDFStructField.class, children, mode, returnType);
+  }
+
+  /**
+   * The field of Struct is stored in StructColumnVector.fields[index].
+   * Check the StructTypeInfo.getAllStructFieldNames() and compare to the field name, get the index.
+   */
+  private int getStructFieldIndex(ExprNodeFieldDesc exprNodeFieldDesc) throws HiveException {
+    ExprNodeDesc structNodeDesc = exprNodeFieldDesc.getDesc();
+    String fieldName = exprNodeFieldDesc.getFieldName();
+    StructTypeInfo structTypeInfo = (StructTypeInfo) structNodeDesc.getTypeInfo();
+    int index = 0;
+    boolean isFieldExist = false;
+    for (String fn : structTypeInfo.getAllStructFieldNames()) {
+      if (fieldName.equals(fn)) {
+        isFieldExist = true;
+        break;
+      }
+      index++;
+    }
+    if (isFieldExist) {
+      return index;
+    } else {
+      throw new HiveException("Could not vectorize expression:" + exprNodeFieldDesc.toString()
+          + ", the field " + fieldName + " doesn't exist.");
+    }
+  }
+
   /**
    * Given a udf and its children, return the common type to which the children's type should be
    * cast.
@@ -1654,7 +1694,8 @@ public class VectorizationContext {
         throw new HiveException("No match for type string " + childTypeString + " from undecorated type name method");
       }
       builder.setArgumentType(i, undecoratedTypeName);
-      if ((child instanceof ExprNodeGenericFuncDesc) || (child instanceof ExprNodeColumnDesc)) {
+      if ((child instanceof ExprNodeGenericFuncDesc) || (child instanceof ExprNodeColumnDesc)
+          || (child instanceof ExprNodeFieldDesc)) {
         builder.setInputExpressionType(i, InputExpressionType.COLUMN);
       } else if (child instanceof ExprNodeConstantDesc) {
         builder.setInputExpressionType(i, InputExpressionType.SCALAR);
@@ -1732,7 +1773,7 @@ public class VectorizationContext {
       inputTypeInfos[i] = childTypeInfo;
       inputDataTypePhysicalVariations[i] = DataTypePhysicalVariation.NONE;   // Assume.
 
-      if (child instanceof ExprNodeGenericFuncDesc) {
+      if ((child instanceof ExprNodeGenericFuncDesc) || (child instanceof ExprNodeFieldDesc)) {
         VectorExpression vChild = getVectorExpression(child, childrenMode);
           children.add(vChild);
           arguments[i] = vChild.getOutputColumnNum();

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFStructField.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFStructField.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFStructField.java
new file mode 100644
index 0000000..0507fa5
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorUDFStructField.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.expressions;
+
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+
+/**
+ * Vectorized instruction to get the field of Struct type with field name and put
+ * the result in an output column.
+ */
+public class VectorUDFStructField extends VectorExpression {
+
+  private int structColumnNum;
+  private int fieldIndex;
+
+  public VectorUDFStructField() {
+    super();
+  }
+
+  public VectorUDFStructField(int structColumnNum, int fieldIndex, int outputColumnNum) {
+    super(outputColumnNum);
+    this.structColumnNum = structColumnNum;
+    this.fieldIndex = fieldIndex;
+  }
+
+  @Override
+  public void evaluate(VectorizedRowBatch batch) {
+    if (childExpressions != null) {
+      super.evaluateChildren(batch);
+    }
+
+    ColumnVector outV = batch.cols[outputColumnNum];
+    StructColumnVector structColumnVector = (StructColumnVector) batch.cols[structColumnNum];
+    ColumnVector fieldColumnVector = structColumnVector.fields[fieldIndex];
+
+    outV.noNulls = true;
+    if (structColumnVector.isRepeating) {
+      if (structColumnVector.isNull[0]) {
+        outV.isNull[0] = true;
+        outV.noNulls = false;
+      } else {
+        outV.setElement(0, 0, fieldColumnVector);
+        outV.isNull[0] = false;
+      }
+      outV.isRepeating = true;
+    } else {
+      for (int i = 0; i < batch.size; i++) {
+        int j = (batch.selectedInUse) ? batch.selected[i] : i;
+        if (structColumnVector.isNull[j]) {
+          outV.isNull[j] = true;
+          outV.noNulls = false;
+        } else {
+          outV.setElement(j, j, fieldColumnVector);
+          outV.isNull[j] = false;
+        }
+      }
+      outV.isRepeating = false;
+    }
+  }
+
+  @Override
+  public String vectorExpressionParameters() {
+    return getColumnParamString(0, structColumnNum) + ", " + getColumnParamString(1, fieldIndex);
+  }
+
+  @Override
+  public VectorExpressionDescriptor.Descriptor getDescriptor() {
+    return (new VectorExpressionDescriptor.Builder())
+        .setMode(
+            VectorExpressionDescriptor.Mode.PROJECTION)
+        .setNumArguments(2)
+        .setArgumentTypes(
+            VectorExpressionDescriptor.ArgumentType.STRUCT,
+            VectorExpressionDescriptor.ArgumentType.INT_FAMILY)
+        .setInputExpressionTypes(
+            VectorExpressionDescriptor.InputExpressionType.COLUMN,
+            VectorExpressionDescriptor.InputExpressionType.SCALAR).build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStructField.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStructField.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStructField.java
new file mode 100644
index 0000000..a668489
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStructField.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.udf.generic;
+
+import org.apache.hadoop.hive.ql.exec.Description;
+import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFStructField;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+
+/**
+ * The class shouldn't be used, and only to align the implementation of vectorization UDF for
+ * struct field.
+ */
+@Description(
+    name = "structField",
+    value = "The class shouldn't be used, and only to align the implementation" +
+        " of vectorization UDF for struct field")
+@VectorizedExpressions({VectorUDFStructField.class})
+public class GenericUDFStructField extends GenericUDF {
+
+  @Override
+  public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
+    throw new UDFArgumentException("GenericUDFStructField shouldn't be used directly.");
+  }
+
+  @Override
+  public Object evaluate(DeferredObject[] arguments) throws HiveException {
+    throw new HiveException("GenericUDFStructField shouldn't be used directly.");
+  }
+
+  @Override
+  public String getDisplayString(String[] children) {
+    throw new RuntimeException("GenericUDFStructField shouldn't be used directly.");
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/queries/clientpositive/orc_struct_type_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_struct_type_vectorization.q b/ql/src/test/queries/clientpositive/orc_struct_type_vectorization.q
new file mode 100644
index 0000000..362ae77
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_struct_type_vectorization.q
@@ -0,0 +1,65 @@
+set hive.mapred.mode=nonstrict;
+set hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+DROP TABLE orc_struct_type_staging;
+DROP TABLE orc_struct_type;
+
+CREATE TABLE orc_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ',';
+
+CREATE TABLE orc_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS ORC;
+
+LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE orc_struct_type_staging;
+-- test data size < 1024
+INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1024;
+
+-- verify the row number
+select count(*) from orc_struct_type;
+-- test select all columns and fields
+explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10;
+select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10;
+-- test select fields only
+select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10;
+select st1.f1, st2.f1 from orc_struct_type limit 10;
+-- test complex select with list
+explain vectorization expression select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 limit 10;
+select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10;
+
+-- test data size = 1024
+INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1025;
+
+-- verify the row number
+select count(*) from orc_struct_type;
+-- test select all columns and fields
+select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10;
+-- test select fields only
+select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10;
+select st1.f1, st2.f1 from orc_struct_type limit 10;
+-- test complex select with list
+select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10;
+
+-- test data size = 1025
+INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1026;
+
+-- verify the row number
+select count(*) from orc_struct_type;
+-- test select all columns and fields
+select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10;
+-- test select fields only
+select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10;
+select st1.f1, st2.f1 from orc_struct_type limit 10;
+-- test complex select with list
+select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/queries/clientpositive/parquet_struct_type_vectorization.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_struct_type_vectorization.q b/ql/src/test/queries/clientpositive/parquet_struct_type_vectorization.q
new file mode 100644
index 0000000..9922b39
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/parquet_struct_type_vectorization.q
@@ -0,0 +1,65 @@
+set hive.mapred.mode=nonstrict;
+set hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+DROP TABLE parquet_struct_type_staging;
+DROP TABLE parquet_struct_type;
+
+CREATE TABLE parquet_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ',';
+
+CREATE TABLE parquet_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS PARQUET;
+
+LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE parquet_struct_type_staging;
+-- test data size < 1024
+INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1024;
+
+-- verify the row number
+select count(*) from parquet_struct_type;
+-- test select all columns and fields
+explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10;
+select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10;
+-- test select fields only
+select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10;
+select st1.f1, st2.f1 from parquet_struct_type limit 10;
+-- test complex select with list
+explain vectorization expression select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 limit 10;
+select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10;
+
+-- test data size = 1024
+INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1025;
+
+-- verify the row number
+select count(*) from parquet_struct_type;
+-- test select all columns and fields
+select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10;
+-- test select fields only
+select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10;
+select st1.f1, st2.f1 from parquet_struct_type limit 10;
+-- test complex select with list
+select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10;
+
+-- test data size = 1025
+INSERT OVERWRITE TABLE parquet_struct_type
+SELECT id, st1, st2 FROM parquet_struct_type_staging where id < 1026;
+
+-- verify the row number
+select count(*) from parquet_struct_type;
+-- test select all columns and fields
+select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from parquet_struct_type limit 10;
+-- test select fields only
+select st1.f1, st2.f1, st2.f3 from parquet_struct_type limit 10;
+select st1.f1, st2.f1 from parquet_struct_type limit 10;
+-- test complex select with list
+select sum(st1.f1), st1.f1 from parquet_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ac721836/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out b/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
new file mode 100644
index 0000000..4cd56f8
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/orc_struct_type_vectorization.q.out
@@ -0,0 +1,535 @@
+PREHOOK: query: DROP TABLE orc_struct_type_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_struct_type_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE orc_struct_type
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE orc_struct_type
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE orc_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_struct_type_staging
+POSTHOOK: query: CREATE TABLE orc_struct_type_staging (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) ROW FORMAT DELIMITED
+  FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ','
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_struct_type_staging
+PREHOOK: query: CREATE TABLE orc_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: CREATE TABLE orc_struct_type (
+id int,
+st1 struct<f1:int, f2:string>,
+st2 struct<f1:int, f3:string>
+) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_struct_type
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE orc_struct_type_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_struct_type_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/struct_type.txt' OVERWRITE INTO TABLE orc_struct_type_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_struct_type_staging
+PREHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1024
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type_staging
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1024
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type_staging
+POSTHOOK: Output: default@orc_struct_type
+POSTHOOK: Lineage: orc_struct_type.id SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st1 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st2 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from orc_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from orc_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1023
+PREHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_struct_type
+                  Statistics: Num rows: 1023 Data size: 459256 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Select Operator
+                    expressions: st1 (type: struct<f1:int,f2:string>), st1.f1 (type: int), st1.f2 (type: string), st2 (type: struct<f1:int,f3:string>), st2.f1 (type: int), st2.f3 (type: string)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    Select Vectorization:
+                        className: VectorSelectOperator
+                        native: true
+                        projectedOutputColumnNums: [1, 4, 5, 2, 6, 7]
+                        selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int, VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 1:int) -> 5:string, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 0:int) -> 6:int, VectorUDFStructField(col 2:struct<f1:int,f3:string>, col 1:int) -> 7:string
+                    Statistics: Num rows: 1023 Data size: 459256 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 10
+                      Limit Vectorization:
+                          className: VectorLimitOperator
+                          native: true
+                      Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        File Sink Vectorization:
+                            className: VectorFileSinkOperator
+                            native: false
+                        Statistics: Num rows: 10 Data size: 4480 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+PREHOOK: type: QUERY
+POSTHOOK: query: explain vectorization expression select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 limit 10
+POSTHOOK: type: QUERY
+PLAN VECTORIZATION:
+  enabled: true
+  enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
+
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: orc_struct_type
+                  Pruned Column Paths: st1.f1
+                  Statistics: Num rows: 1023 Data size: 229628 Basic stats: COMPLETE Column stats: NONE
+                  TableScan Vectorization:
+                      native: true
+                  Filter Operator
+                    Filter Vectorization:
+                        className: VectorFilterOperator
+                        native: true
+                        predicateExpression: FilterLongColGreaterLongScalar(col 4:int, val 500)(children: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int)
+                    predicate: (st1.f1 > 500) (type: boolean)
+                    Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: st1.f1 (type: int)
+                      outputColumnNames: _col0
+                      Select Vectorization:
+                          className: VectorSelectOperator
+                          native: true
+                          projectedOutputColumnNums: [4]
+                          selectExpressions: VectorUDFStructField(col 1:struct<f1:int,f2:string>, col 0:int) -> 4:int
+                      Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                      Group By Operator
+                        aggregations: sum(_col0)
+                        Group By Vectorization:
+                            aggregators: VectorUDAFSumLong(col 4:int) -> bigint
+                            className: VectorGroupByOperator
+                            groupByMode: HASH
+                            keyExpressions: col 4:int
+                            native: false
+                            vectorProcessingMode: HASH
+                            projectedOutputColumnNums: [0]
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0, _col1
+                        Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Reduce Sink Vectorization:
+                              className: VectorReduceSinkLongOperator
+                              native: true
+                              nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
+                          Statistics: Num rows: 341 Data size: 76542 Basic stats: COMPLETE Column stats: NONE
+                          TopN Hash Memory Usage: 0.1
+                          value expressions: _col1 (type: bigint)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+            Map Vectorization:
+                enabled: true
+                enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
+                inputFormatFeatureSupport: []
+                featureSupportInUse: []
+                inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+        Reducer 2 
+            Execution mode: vectorized, llap
+            Reduce Vectorization:
+                enabled: true
+                enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
+                allNative: false
+                usesVectorUDFAdaptor: false
+                vectorized: true
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: sum(VALUE._col0)
+                Group By Vectorization:
+                    aggregators: VectorUDAFSumLong(col 1:bigint) -> bigint
+                    className: VectorGroupByOperator
+                    groupByMode: MERGEPARTIAL
+                    keyExpressions: col 0:int
+                    native: false
+                    vectorProcessingMode: MERGE_PARTIAL
+                    projectedOutputColumnNums: [0]
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 170 Data size: 38158 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: bigint), _col0 (type: int)
+                  outputColumnNames: _col0, _col1
+                  Select Vectorization:
+                      className: VectorSelectOperator
+                      native: true
+                      projectedOutputColumnNums: [1, 0]
+                  Statistics: Num rows: 170 Data size: 38158 Basic stats: COMPLETE Column stats: NONE
+                  Limit
+                    Number of rows: 10
+                    Limit Vectorization:
+                        className: VectorLimitOperator
+                        native: true
+                    Statistics: Num rows: 10 Data size: 2240 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      File Sink Vectorization:
+                          className: VectorFileSinkOperator
+                          native: false
+                      Statistics: Num rows: 10 Data size: 2240 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1025
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type_staging
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1025
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type_staging
+POSTHOOK: Output: default@orc_struct_type
+POSTHOOK: Lineage: orc_struct_type.id SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st1 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st2 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from orc_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from orc_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1024
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510
+PREHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1026
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type_staging
+PREHOOK: Output: default@orc_struct_type
+POSTHOOK: query: INSERT OVERWRITE TABLE orc_struct_type
+SELECT id, st1, st2 FROM orc_struct_type_staging where id < 1026
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type_staging
+POSTHOOK: Output: default@orc_struct_type
+POSTHOOK: Lineage: orc_struct_type.id SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:id, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st1 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st1, type:struct<f1:int,f2:string>, comment:null), ]
+POSTHOOK: Lineage: orc_struct_type.st2 SIMPLE [(orc_struct_type_staging)orc_struct_type_staging.FieldSchema(name:st2, type:struct<f1:int,f3:string>, comment:null), ]
+PREHOOK: query: select count(*) from orc_struct_type
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select count(*) from orc_struct_type
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1025
+PREHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1, st1.f1, st1.f2, st2, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+{"f1":1,"f2":"str1"}	1	str1	{"f1":2001,"f3":"str2001"}	2001	str2001
+{"f1":2,"f2":"str2"}	2	str2	{"f1":2002,"f3":"str2002"}	2002	str2002
+{"f1":3,"f2":"str3"}	3	str3	{"f1":2003,"f3":"str2003"}	2003	str2003
+{"f1":4,"f2":"str4"}	4	str4	{"f1":2004,"f3":"str2004"}	2004	str2004
+{"f1":5,"f2":"str5"}	5	str5	{"f1":2005,"f3":"str2005"}	2005	str2005
+{"f1":6,"f2":"str6"}	6	str6	{"f1":2006,"f3":"str2006"}	2006	str2006
+{"f1":7,"f2":"str7"}	7	str7	{"f1":2007,"f3":"str2007"}	2007	str2007
+{"f1":8,"f2":"str8"}	8	str8	{"f1":2008,"f3":"str2008"}	2008	str2008
+{"f1":9,"f2":"str9"}	9	str9	{"f1":2009,"f3":"str2009"}	2009	str2009
+{"f1":10,"f2":"str10"}	10	str10	{"f1":2010,"f3":"str2010"}	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1, st2.f3 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001	str2001
+2	2002	str2002
+3	2003	str2003
+4	2004	str2004
+5	2005	str2005
+6	2006	str2006
+7	2007	str2007
+8	2008	str2008
+9	2009	str2009
+10	2010	str2010
+PREHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select st1.f1, st2.f1 from orc_struct_type limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+1	2001
+2	2002
+3	2003
+4	2004
+5	2005
+6	2006
+7	2007
+8	2008
+9	2009
+10	2010
+PREHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_struct_type
+PREHOOK: Output: hdfs://### HDFS PATH ###
+POSTHOOK: query: select sum(st1.f1), st1.f1 from orc_struct_type where st1.f1 > 500 group by st1.f1 order by st1.f1 limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_struct_type
+POSTHOOK: Output: hdfs://### HDFS PATH ###
+501	501
+502	502
+503	503
+504	504
+505	505
+506	506
+507	507
+508	508
+509	509
+510	510