You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2018/01/18 17:55:40 UTC

[20/70] [abbrv] hive git commit: HIVE-18416: Initial support for TABLE function (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
index db5dd86..6b29dad 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
@@ -26,50 +26,58 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: values__tmp__table__1
-            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.partitioned1
-              Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
-                outputColumnNames: a, b, part
-                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
-                  keys: part (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+              expressions: array(const struct(1,'original'),const struct(2,'original'),const struct(3,'original'),const struct(4,'original')) (type: array<struct<col1:int,col2:string>>)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+              UDTF Operator
+                Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+                function name: inline
+                Select Operator
+                  expressions: col1 (type: int), col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.partitioned1
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                    outputColumnNames: a, b, part
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                      keys: part (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -136,13 +144,15 @@ STAGE PLANS:
 
 PREHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@partitioned1@part=1
 POSTHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@partitioned1@part=1
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SCRIPT []
+col1	col2
 PREHOOK: query: desc formatted partitioned1 partition(part=1)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@partitioned1
@@ -266,50 +276,58 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: values__tmp__table__3
-            Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string), UDFToInteger(tmp_values_col3) (type: int), tmp_values_col4 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.partitioned1
-              Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('2') (type: int)
-                outputColumnNames: a, b, c, d, part
-                Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll'), compute_stats(d, 'hll')
-                  keys: part (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                  Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+              expressions: array(const struct(1,'new',10,'ten'),const struct(2,'new',20,'twenty'),const struct(3,'new',30,'thirty'),const struct(4,'new',40,'forty')) (type: array<struct<col1:int,col2:string,col3:int,col4:string>>)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+              UDTF Operator
+                Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+                function name: inline
+                Select Operator
+                  expressions: col1 (type: int), col2 (type: string), col3 (type: int), col4 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.partitioned1
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('2') (type: int)
+                    outputColumnNames: a, b, c, d, part
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll'), compute_stats(d, 'hll')
+                      keys: part (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Statistics: Num rows: 1 Data size: 1732 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 1732 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 1764 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 1764 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 600 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 1764 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -376,15 +394,17 @@ STAGE PLANS:
 
 PREHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@partitioned1@part=2
 POSTHOOK: query: insert into table partitioned1 partition(part=2) values(1, 'new', 10, 'ten'),(2, 'new', 20, 'twenty'), (3, 'new', 30, 'thirty'),(4, 'new', 40, 'forty')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@partitioned1@part=2
-POSTHOOK: Lineage: partitioned1 PARTITION(part=2).a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=2).b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=2).c EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=2).d SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-_col0	_col1	_col2	_col3
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).a SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).b SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).c SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=2).d SCRIPT []
+col1	col2	col3	col4
 PREHOOK: query: desc formatted partitioned1 partition(part=2)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@partitioned1
@@ -464,50 +484,58 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: values__tmp__table__5
-            Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string), UDFToInteger(tmp_values_col3) (type: int), tmp_values_col4 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.partitioned1
-              Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('1') (type: int)
-                outputColumnNames: a, b, c, d, part
-                Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll'), compute_stats(d, 'hll')
-                  keys: part (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                  Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+              expressions: array(const struct(5,'new',100,'hundred'),const struct(6,'new',200,'two hundred')) (type: array<struct<col1:int,col2:string,col3:int,col4:string>>)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+              UDTF Operator
+                Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE
+                function name: inline
+                Select Operator
+                  expressions: col1 (type: int), col2 (type: string), col3 (type: int), col4 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.partitioned1
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('1') (type: int)
+                    outputColumnNames: a, b, c, d, part
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll'), compute_stats(c, 'hll'), compute_stats(d, 'hll')
+                      keys: part (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Statistics: Num rows: 1 Data size: 1732 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 1732 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 1764 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 1764 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 400 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 1764 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -574,15 +602,17 @@ STAGE PLANS:
 
 PREHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@partitioned1@part=1
 POSTHOOK: query: insert into table partitioned1 partition(part=1) values(5, 'new', 100, 'hundred'),(6, 'new', 200, 'two hundred')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@partitioned1@part=1
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).c EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).d SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
-_col0	_col1	_col2	_col3
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).c SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).d SCRIPT []
+col1	col2	col3	col4
 PREHOOK: query: desc formatted partitioned1 partition(part=1)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@partitioned1

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
index 40548d0..44f4c71 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_5a.q.out
@@ -28,98 +28,106 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: values__tmp__table__1
-            Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             GatherStats: false
             Select Operator
-              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: part=1/
-                Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      column.name.delimiter ,
-                      columns a,b
-                      columns.comments 
-                      columns.types int:string
-#### A masked pattern was here ####
-                      name default.partitioned1
-                      partition_columns.types int
-                      serialization.ddl struct partitioned1 { i32 a, string b}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.partitioned1
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-              Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
-                outputColumnNames: a, b, part
-                Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
-                  keys: part (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    null sort order: a
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
-                    tag: -1
-                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
-                    auto parallelism: false
+              expressions: array(const struct(1,'original')) (type: array<struct<col1:int,col2:string>>)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+              UDTF Operator
+                Statistics: Num rows: 1 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                function name: inline
+                Select Operator
+                  expressions: col1 (type: int), col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Static Partition Specification: part=1/
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          bucket_count -1
+                          column.name.delimiter ,
+                          columns a,b
+                          columns.comments 
+                          columns.types int:string
+#### A masked pattern was here ####
+                          name default.partitioned1
+                          partition_columns.types int
+                          serialization.ddl struct partitioned1 { i32 a, string b}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.partitioned1
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                    outputColumnNames: a, b, part
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                      keys: part (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: -1
+                        value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+                        auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: Values__Tmp__Table__1
-            input format: org.apache.hadoop.mapred.TextInputFormat
+            base file name: dummy_path
+            input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
               bucket_count -1
               column.name.delimiter ,
-              columns tmp_values_col1,tmp_values_col2
+              columns 
               columns.comments 
-              columns.types string:string
+              columns.types 
 #### A masked pattern was here ####
-              name default.values__tmp__table__1
-              serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
+              name _dummy_database._dummy_table
+              serialization.ddl struct _dummy_table { }
               serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+            serde: org.apache.hadoop.hive.serde2.NullStructSerDe
           
-              input format: org.apache.hadoop.mapred.TextInputFormat
+              input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
                 bucket_count -1
                 column.name.delimiter ,
-                columns tmp_values_col1,tmp_values_col2
+                columns 
                 columns.comments 
-                columns.types string:string
+                columns.types 
 #### A masked pattern was here ####
-                name default.values__tmp__table__1
-                serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
+                name _dummy_database._dummy_table
+                serialization.ddl struct _dummy_table { }
                 serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.values__tmp__table__1
-            name: default.values__tmp__table__1
+                serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+              serde: org.apache.hadoop.hive.serde2.NullStructSerDe
+              name: _dummy_database._dummy_table
+            name: _dummy_database._dummy_table
       Truncated Path -> Alias:
 #### A masked pattern was here ####
       Needs Tagging: false
@@ -129,17 +137,17 @@ STAGE PLANS:
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 1 Data size: 110 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -362,13 +370,15 @@ STAGE PLANS:
 
 PREHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@partitioned1@part=1
 POSTHOOK: query: insert into table partitioned1 partition(part=1) values(1, 'original')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@partitioned1@part=1
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SCRIPT []
+col1	col2
 PREHOOK: query: desc formatted partitioned1 partition(part=1)
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@partitioned1
@@ -429,98 +439,106 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: values__tmp__table__3
-            Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             GatherStats: false
             Select Operator
-              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: part=1/
-                Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      column.name.delimiter ,
-                      columns a,b
-                      columns.comments 
-                      columns.types int:string
-#### A masked pattern was here ####
-                      name default.partitioned1
-                      partition_columns.types int
-                      serialization.ddl struct partitioned1 { i32 a, string b}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.partitioned1
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-              Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
-                outputColumnNames: a, b, part
-                Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
-                  keys: part (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    null sort order: a
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
-                    tag: -1
-                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
-                    auto parallelism: false
+              expressions: array(const struct(2,'original'),const struct(3,'original'),const struct(4,'original')) (type: array<struct<col1:int,col2:string>>)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+              UDTF Operator
+                Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+                function name: inline
+                Select Operator
+                  expressions: col1 (type: int), col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 1
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
+                    Static Partition Specification: part=1/
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        properties:
+                          bucket_count -1
+                          column.name.delimiter ,
+                          columns a,b
+                          columns.comments 
+                          columns.types int:string
+#### A masked pattern was here ####
+                          name default.partitioned1
+                          partition_columns.types int
+                          serialization.ddl struct partitioned1 { i32 a, string b}
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.partitioned1
+                    TotalFiles: 1
+                    GatherStats: true
+                    MultiFileSpray: false
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                    outputColumnNames: a, b, part
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                      keys: part (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                        tag: -1
+                        value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+                        auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
 #### A masked pattern was here ####
           Partition
-            base file name: Values__Tmp__Table__3
-            input format: org.apache.hadoop.mapred.TextInputFormat
+            base file name: dummy_path
+            input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
               bucket_count -1
               column.name.delimiter ,
-              columns tmp_values_col1,tmp_values_col2
+              columns 
               columns.comments 
-              columns.types string:string
+              columns.types 
 #### A masked pattern was here ####
-              name default.values__tmp__table__3
-              serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+              name _dummy_database._dummy_table
+              serialization.ddl struct _dummy_table { }
               serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+            serde: org.apache.hadoop.hive.serde2.NullStructSerDe
           
-              input format: org.apache.hadoop.mapred.TextInputFormat
+              input format: org.apache.hadoop.hive.ql.io.NullRowsInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
                 bucket_count -1
                 column.name.delimiter ,
-                columns tmp_values_col1,tmp_values_col2
+                columns 
                 columns.comments 
-                columns.types string:string
+                columns.types 
 #### A masked pattern was here ####
-                name default.values__tmp__table__3
-                serialization.ddl struct values__tmp__table__3 { string tmp_values_col1, string tmp_values_col2}
+                name _dummy_database._dummy_table
+                serialization.ddl struct _dummy_table { }
                 serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.values__tmp__table__3
-            name: default.values__tmp__table__3
+                serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+              serde: org.apache.hadoop.hive.serde2.NullStructSerDe
+              name: _dummy_database._dummy_table
+            name: _dummy_database._dummy_table
       Truncated Path -> Alias:
 #### A masked pattern was here ####
       Needs Tagging: false
@@ -530,17 +548,17 @@ STAGE PLANS:
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 1 Data size: 330 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -763,13 +781,15 @@ STAGE PLANS:
 
 PREHOOK: query: insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@partitioned1@part=1
 POSTHOOK: query: insert into table partitioned1 partition(part=1) values(2, 'original'), (3, 'original'),(4, 'original')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@partitioned1@part=1
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SIMPLE [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-_col0	_col1
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).a SCRIPT []
+POSTHOOK: Lineage: partitioned1 PARTITION(part=1).b SCRIPT []
+col1	col2
 PREHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
 PREHOOK: type: QUERY
 POSTHOOK: query: explain insert into table partitioned1 partition(part=1) values(1, 'original'),(2, 'original'), (3, 'original'),(4, 'original')
@@ -790,50 +810,58 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: values__tmp__table__5
-            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+            alias: _dummy_table
+            Row Limit Per Split: 1
+            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              expressions: UDFToInteger(tmp_values_col1) (type: int), tmp_values_col2 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.partitioned1
-              Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
-                outputColumnNames: a, b, part
-                Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
-                  keys: part (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
+              expressions: array(const struct(1,'original'),const struct(2,'original'),const struct(3,'original'),const struct(4,'original')) (type: array<struct<col1:int,col2:string>>)
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+              UDTF Operator
+                Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+                function name: inline
+                Select Operator
+                  expressions: col1 (type: int), col2 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.partitioned1
+                  Select Operator
+                    expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
+                    outputColumnNames: a, b, part
+                    Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      aggregations: compute_stats(a, 'hll'), compute_stats(b, 'hll')
+                      keys: part (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2
+                      Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1 Data size: 868 Basic stats: COMPLETE Column stats: COMPLETE
+                        value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:binary>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
           keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:binary>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 1 Data size: 440 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 884 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/avrotblsjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avrotblsjoin.q.out b/ql/src/test/results/clientpositive/avrotblsjoin.q.out
index d0170a3..a686235 100644
--- a/ql/src/test/results/clientpositive/avrotblsjoin.q.out
+++ b/ql/src/test/results/clientpositive/avrotblsjoin.q.out
@@ -52,21 +52,25 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table1_1
 PREHOOK: query: insert into table1 values ("1", "2", "3")
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@table1
 POSTHOOK: query: insert into table1 values ("1", "2", "3")
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@table1
-POSTHOOK: Lineage: table1.col1 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1.col2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: table1.col3 SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: table1.col1 SCRIPT []
+POSTHOOK: Lineage: table1.col2 SCRIPT []
+POSTHOOK: Lineage: table1.col3 SCRIPT []
 PREHOOK: query: insert into table1_1 values (1, "2")
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@table1_1
 POSTHOOK: query: insert into table1_1 values (1, "2")
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@table1_1
-POSTHOOK: Lineage: table1_1.col1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: table1_1.col2 SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: table1_1.col1 SCRIPT []
+POSTHOOK: Lineage: table1_1.col2 SCRIPT []
 WARNING: Comparing a bigint and a string may result in a loss of precision.
 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select table1.col1, table1_1.* from table1 join table1_1 on table1.col1=table1_1.col1 where table1_1.col1="1"

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/basicstat_partval.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/basicstat_partval.q.out b/ql/src/test/results/clientpositive/basicstat_partval.q.out
index 1e46d58..ac75365 100644
--- a/ql/src/test/results/clientpositive/basicstat_partval.q.out
+++ b/ql/src/test/results/clientpositive/basicstat_partval.q.out
@@ -8,18 +8,22 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@p1
 PREHOOK: query: insert into p1 partition(p='a') values (1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@p1@p=a
 POSTHOOK: query: insert into p1 partition(p='a') values (1)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@p1@p=a
-POSTHOOK: Lineage: p1 PARTITION(p=a).i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: p1 PARTITION(p=a).i SCRIPT []
 PREHOOK: query: insert into p1 partition(p='A') values (2),(3)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@p1@p=A
 POSTHOOK: query: insert into p1 partition(p='A') values (2),(3)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@p1@p=A
-POSTHOOK: Lineage: p1 PARTITION(p=A).i EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: p1 PARTITION(p=A).i SCRIPT []
 PREHOOK: query: describe formatted p1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@p1

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/beeline/mapjoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/mapjoin2.q.out b/ql/src/test/results/clientpositive/beeline/mapjoin2.q.out
index 7e70841..f178742 100644
--- a/ql/src/test/results/clientpositive/beeline/mapjoin2.q.out
+++ b/ql/src/test/results/clientpositive/beeline/mapjoin2.q.out
@@ -8,20 +8,24 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tbl
 PREHOOK: query: insert into tbl values (1, 'one')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@tbl
 POSTHOOK: query: insert into tbl values (1, 'one')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl
-POSTHOOK: Lineage: tbl.n EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: tbl.t SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: tbl.n SCRIPT []
+POSTHOOK: Lineage: tbl.t SCRIPT []
 PREHOOK: query: insert into tbl values(2, 'two')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@tbl
 POSTHOOK: query: insert into tbl values(2, 'two')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@tbl
-POSTHOOK: Lineage: tbl.n EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: tbl.t SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: tbl.n SCRIPT []
+POSTHOOK: Lineage: tbl.t SCRIPT []
 Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: select a.n, a.t, isnull(b.n), isnull(b.t) from (select * from tbl where n = 1) a  left outer join  (select * from tbl where 1 = 2) b on a.n = b.n
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
index 5541aeb..15c51e1 100644
--- a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
+++ b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
@@ -13,6 +13,7 @@ PREHOOK: query: insert into cmv_basetable values
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@cmv_basetable
 POSTHOOK: query: insert into cmv_basetable values
  (1, 'alfred', 10.30, 2),
@@ -21,11 +22,12 @@ POSTHOOK: query: insert into cmv_basetable values
  (3, 'calvin', 978.76, 3),
  (3, 'charlie', 9.8, 1)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@cmv_basetable
-POSTHOOK: Lineage: cmv_basetable.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: cmv_basetable.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: cmv_basetable.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: cmv_basetable.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ]
+POSTHOOK: Lineage: cmv_basetable.a SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.b SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.c SCRIPT []
+POSTHOOK: Lineage: cmv_basetable.d SCRIPT []
 PREHOOK: query: analyze table cmv_basetable compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cmv_basetable

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/bucket_num_reducers_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_num_reducers_acid.q.out b/ql/src/test/results/clientpositive/bucket_num_reducers_acid.q.out
index 8ea23d7..81ba5ae 100644
--- a/ql/src/test/results/clientpositive/bucket_num_reducers_acid.q.out
+++ b/ql/src/test/results/clientpositive/bucket_num_reducers_acid.q.out
@@ -12,9 +12,11 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@bucket_nr_acid
 PREHOOK: query: insert into bucket_nr_acid values(1,1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@bucket_nr_acid
 PREHOOK: query: insert into bucket_nr_acid values(0,0),(3,3)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@bucket_nr_acid
 PREHOOK: query: update bucket_nr_acid set b = -1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/bucket_num_reducers_acid2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_num_reducers_acid2.q.out b/ql/src/test/results/clientpositive/bucket_num_reducers_acid2.q.out
index cabb4f7..94a2884 100644
--- a/ql/src/test/results/clientpositive/bucket_num_reducers_acid2.q.out
+++ b/ql/src/test/results/clientpositive/bucket_num_reducers_acid2.q.out
@@ -12,15 +12,19 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@bucket_nr_acid2
 PREHOOK: query: insert into bucket_nr_acid2 values(0,1),(1,1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@bucket_nr_acid2
 PREHOOK: query: insert into bucket_nr_acid2 values(2,2),(3,2)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@bucket_nr_acid2
 PREHOOK: query: insert into bucket_nr_acid2 values(0,3),(1,3)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@bucket_nr_acid2
 PREHOOK: query: insert into bucket_nr_acid2 values(2,4),(3,4)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@bucket_nr_acid2
 PREHOOK: query: update bucket_nr_acid2 set b = -1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/cbo_rp_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_insert.q.out b/ql/src/test/results/clientpositive/cbo_rp_insert.q.out
deleted file mode 100644
index fde7915..0000000
--- a/ql/src/test/results/clientpositive/cbo_rp_insert.q.out
+++ /dev/null
@@ -1,83 +0,0 @@
-PREHOOK: query: drop database if exists x314 cascade
-PREHOOK: type: DROPDATABASE
-POSTHOOK: query: drop database if exists x314 cascade
-POSTHOOK: type: DROPDATABASE
-PREHOOK: query: create database x314
-PREHOOK: type: CREATEDATABASE
-PREHOOK: Output: database:x314
-POSTHOOK: query: create database x314
-POSTHOOK: type: CREATEDATABASE
-POSTHOOK: Output: database:x314
-PREHOOK: query: use x314
-PREHOOK: type: SWITCHDATABASE
-PREHOOK: Input: database:x314
-POSTHOOK: query: use x314
-POSTHOOK: type: SWITCHDATABASE
-POSTHOOK: Input: database:x314
-PREHOOK: query: create table source(s1 int, s2 int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:x314
-PREHOOK: Output: x314@source
-POSTHOOK: query: create table source(s1 int, s2 int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:x314
-POSTHOOK: Output: x314@source
-PREHOOK: query: create table target1(x int, y int, z int)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:x314
-PREHOOK: Output: x314@target1
-POSTHOOK: query: create table target1(x int, y int, z int)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:x314
-POSTHOOK: Output: x314@target1
-PREHOOK: query: insert into source(s2,s1) values(2,1)
-PREHOOK: type: QUERY
-PREHOOK: Output: x314@source
-POSTHOOK: query: insert into source(s2,s1) values(2,1)
-POSTHOOK: type: QUERY
-POSTHOOK: Output: x314@source
-POSTHOOK: Lineage: source.s1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: source.s2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-PREHOOK: query: select * from source
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@source
-#### A masked pattern was here ####
-POSTHOOK: query: select * from source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@source
-#### A masked pattern was here ####
-1	2
-PREHOOK: query: insert into target1(z,x) select * from source
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@source
-PREHOOK: Output: x314@target1
-POSTHOOK: query: insert into target1(z,x) select * from source
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@source
-POSTHOOK: Output: x314@target1
-POSTHOOK: Lineage: target1.x SIMPLE [(source)source.FieldSchema(name:s2, type:int, comment:null), ]
-POSTHOOK: Lineage: target1.y SIMPLE []
-POSTHOOK: Lineage: target1.z SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ]
-PREHOOK: query: select * from target1
-PREHOOK: type: QUERY
-PREHOOK: Input: x314@target1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from target1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: x314@target1
-#### A masked pattern was here ####
-2	NULL	1
-PREHOOK: query: drop database if exists x314 cascade
-PREHOOK: type: DROPDATABASE
-PREHOOK: Input: database:x314
-PREHOOK: Output: database:x314
-PREHOOK: Output: x314@source
-PREHOOK: Output: x314@target1
-PREHOOK: Output: x314@values__tmp__table__1
-POSTHOOK: query: drop database if exists x314 cascade
-POSTHOOK: type: DROPDATABASE
-POSTHOOK: Input: database:x314
-POSTHOOK: Output: database:x314
-POSTHOOK: Output: x314@source
-POSTHOOK: Output: x314@target1
-POSTHOOK: Output: x314@values__tmp__table__1

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/cmdwithcomments.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cmdwithcomments.q.out b/ql/src/test/results/clientpositive/cmdwithcomments.q.out
index 151a82c..a05522a 100644
--- a/ql/src/test/results/clientpositive/cmdwithcomments.q.out
+++ b/ql/src/test/results/clientpositive/cmdwithcomments.q.out
@@ -11,21 +11,25 @@ POSTHOOK: Output: default@numt
 PREHOOK: query: --comment2
 insert into numt values(1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@numt
 POSTHOOK: query: --comment2
 insert into numt values(1)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@numt
-POSTHOOK: Lineage: numt.idx EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: numt.idx SCRIPT []
 PREHOOK: query: --comment3
 insert into numt values(2)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@numt
 POSTHOOK: query: --comment3
 insert into numt values(2)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@numt
-POSTHOOK: Lineage: numt.idx EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: numt.idx SCRIPT []
 PREHOOK: query: select idx from numt where --comment5
 idx = 1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
index ee0da96..1a4cf73 100644
--- a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
+++ b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
@@ -12,12 +12,14 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@calendar
 PREHOOK: query: insert into calendar values (2010, 10), (2011, 11), (2012, 12)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@calendar
 POSTHOOK: query: insert into calendar values (2010, 10), (2011, 11), (2012, 12)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@calendar
-POSTHOOK: Lineage: calendar.month EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: calendar.year EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: calendar.month SCRIPT []
+POSTHOOK: Lineage: calendar.year SCRIPT []
 PREHOOK: query: desc formatted calendar
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@calendar

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/column_names_with_leading_and_trailing_spaces.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/column_names_with_leading_and_trailing_spaces.q.out b/ql/src/test/results/clientpositive/column_names_with_leading_and_trailing_spaces.q.out
index 2b87d50..b327bf8 100644
--- a/ql/src/test/results/clientpositive/column_names_with_leading_and_trailing_spaces.q.out
+++ b/ql/src/test/results/clientpositive/column_names_with_leading_and_trailing_spaces.q.out
@@ -62,13 +62,15 @@ comment             	from deserializer
 COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\" left\":\"true\",\" middle \":\"true\",\"right \":\"true\"}}	 	 	 	 	 	 	 	 	 	 
 PREHOOK: query: insert into space values ("1", "2", "3")
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@space
 POSTHOOK: query: insert into space values ("1", "2", "3")
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@space
-POSTHOOK: Lineage: space. left SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: space. middle  SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: space.right  SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
+POSTHOOK: Lineage: space. left SCRIPT []
+POSTHOOK: Lineage: space. middle  SCRIPT []
+POSTHOOK: Lineage: space.right  SCRIPT []
 PREHOOK: query: desc formatted space ` left`
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@space
@@ -99,12 +101,14 @@ POSTHOOK: Input: default@space
 1	2	3
 PREHOOK: query: insert into space (` middle `) values("2")
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@space
 POSTHOOK: query: insert into space (` middle `) values("2")
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@space
 POSTHOOK: Lineage: space. left SIMPLE []
-POSTHOOK: Lineage: space. middle  SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: space. middle  SCRIPT []
 POSTHOOK: Lineage: space.right  SIMPLE []
 PREHOOK: query: select * from space order by ` left`
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/comma_in_column_name.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/comma_in_column_name.q.out b/ql/src/test/results/clientpositive/comma_in_column_name.q.out
index f8e319d..43357c0 100644
--- a/ql/src/test/results/clientpositive/comma_in_column_name.q.out
+++ b/ql/src/test/results/clientpositive/comma_in_column_name.q.out
@@ -8,11 +8,13 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test
 PREHOOK: query: insert into test values (1),(2)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@test
 POSTHOOK: query: insert into test values (1),(2)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@test
-POSTHOOK: Lineage: test.x,y EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: test.x,y SCRIPT []
 PREHOOK: query: select `x,y` from test where `x,y` >=2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test
@@ -40,11 +42,13 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test
 PREHOOK: query: insert into test values (1),(2)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@test
 POSTHOOK: query: insert into test values (1),(2)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@test
-POSTHOOK: Lineage: test.x,y EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: test.x,y SCRIPT []
 PREHOOK: query: select `x,y` from test where `x,y` <2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@test

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/concat_op.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/concat_op.q.out b/ql/src/test/results/clientpositive/concat_op.q.out
index 33c84f9..e260518 100644
--- a/ql/src/test/results/clientpositive/concat_op.q.out
+++ b/ql/src/test/results/clientpositive/concat_op.q.out
@@ -148,18 +148,22 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ct2
 PREHOOK: query: insert into ct1 values (7),(5),(3),(1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@ct1
 POSTHOOK: query: insert into ct1 values (7),(5),(3),(1)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@ct1
-POSTHOOK: Lineage: ct1.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: ct1.c SCRIPT []
 PREHOOK: query: insert into ct2 values (8),(6),(4),(2)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@ct2
 POSTHOOK: query: insert into ct2 values (8),(6),(4),(2)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@ct2
-POSTHOOK: Lineage: ct2.c EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: ct2.c SCRIPT []
 PREHOOK: query: create view ct_v1 as select * from ct1 union all select * from ct2
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@ct1

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/constantPropWhen.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constantPropWhen.q.out b/ql/src/test/results/clientpositive/constantPropWhen.q.out
index 4f4bf80..c03a824 100644
--- a/ql/src/test/results/clientpositive/constantPropWhen.q.out
+++ b/ql/src/test/results/clientpositive/constantPropWhen.q.out
@@ -12,12 +12,14 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test_1
 PREHOOK: query: insert into table test_1 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@test_1
 POSTHOOK: query: insert into table test_1 values (123, NULL), (NULL, NULL), (NULL, 123), (123, 123)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@test_1
-POSTHOOK: Lineage: test_1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: test_1.id2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: test_1.id SCRIPT []
+POSTHOOK: Lineage: test_1.id2 SCRIPT []
 PREHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain SELECT cast(CASE WHEN id = id2 THEN FALSE ELSE TRUE END AS BOOLEAN) AS b FROM test_1

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/constantfolding.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out
index f9a9d24..c4fce25 100644
--- a/ql/src/test/results/clientpositive/constantfolding.q.out
+++ b/ql/src/test/results/clientpositive/constantfolding.q.out
@@ -60,34 +60,42 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@union_all_bug_test_2
 PREHOOK: query: insert into table union_all_bug_test_1 values (1,1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@union_all_bug_test_1
 POSTHOOK: query: insert into table union_all_bug_test_1 values (1,1)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@union_all_bug_test_1
-POSTHOOK: Lineage: union_all_bug_test_1.f1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: union_all_bug_test_1.f2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: union_all_bug_test_1.f1 SCRIPT []
+POSTHOOK: Lineage: union_all_bug_test_1.f2 SCRIPT []
 PREHOOK: query: insert into table union_all_bug_test_2 values (1)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@union_all_bug_test_2
 POSTHOOK: query: insert into table union_all_bug_test_2 values (1)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@union_all_bug_test_2
-POSTHOOK: Lineage: union_all_bug_test_2.f1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: union_all_bug_test_2.f1 SCRIPT []
 PREHOOK: query: insert into table union_all_bug_test_1 values (0,0)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@union_all_bug_test_1
 POSTHOOK: query: insert into table union_all_bug_test_1 values (0,0)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@union_all_bug_test_1
-POSTHOOK: Lineage: union_all_bug_test_1.f1 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: union_all_bug_test_1.f2 EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: union_all_bug_test_1.f1 SCRIPT []
+POSTHOOK: Lineage: union_all_bug_test_1.f2 SCRIPT []
 PREHOOK: query: insert into table union_all_bug_test_2 values (0)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@union_all_bug_test_2
 POSTHOOK: query: insert into table union_all_bug_test_2 values (0)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@union_all_bug_test_2
-POSTHOOK: Lineage: union_all_bug_test_2.f1 EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: union_all_bug_test_2.f1 SCRIPT []
 PREHOOK: query: SELECT f1
 FROM (
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/cte_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_5.q.out b/ql/src/test/results/clientpositive/cte_5.q.out
index 3d7b324..1c7812e 100644
--- a/ql/src/test/results/clientpositive/cte_5.q.out
+++ b/ql/src/test/results/clientpositive/cte_5.q.out
@@ -20,12 +20,14 @@ POSTHOOK: Output: database:mydb
 POSTHOOK: Output: mydb@q1
 PREHOOK: query: insert into q1 values (5, 'A')
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: mydb@q1
 POSTHOOK: query: insert into q1 values (5, 'A')
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: mydb@q1
-POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
-POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+POSTHOOK: Lineage: q1.colnum SCRIPT []
+POSTHOOK: Lineage: q1.colstring SCRIPT []
 PREHOOK: query: use default
 PREHOOK: type: SWITCHDATABASE
 PREHOOK: Input: database:default
@@ -39,7 +41,6 @@ POSTHOOK: query: show tables in mydb
 POSTHOOK: type: SHOWTABLES
 POSTHOOK: Input: database:mydb
 q1
-values__tmp__table__1
 PREHOOK: query: show tables
 PREHOOK: type: SHOWTABLES
 PREHOOK: Input: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/7e64114d/ql/src/test/results/clientpositive/cte_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_7.q.out b/ql/src/test/results/clientpositive/cte_7.q.out
index 54b6841..6f82dd4 100644
--- a/ql/src/test/results/clientpositive/cte_7.q.out
+++ b/ql/src/test/results/clientpositive/cte_7.q.out
@@ -8,13 +8,15 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t
 PREHOOK: query: insert into t values (1,'hello','world'),(2,'bye',null)
 PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
 PREHOOK: Output: default@t
 POSTHOOK: query: insert into t values (1,'hello','world'),(2,'bye',null)
 POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: default@t
-POSTHOOK: Lineage: t.a SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
-POSTHOOK: Lineage: t.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-POSTHOOK: Lineage: t.i EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: t.a SCRIPT []
+POSTHOOK: Lineage: t.b SCRIPT []
+POSTHOOK: Lineage: t.i SCRIPT []
 PREHOOK: query: select * from t where t.b is null
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t