You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/18 18:52:29 UTC

[33/48] hive git commit: HIVE-19940: Push predicates with deterministic UDFs with RBO (Janaki Lahorani, reviewed by Vineet Garg, Naveen Gangam)

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out b/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out
new file mode 100644
index 0000000..b96a0e2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out
@@ -0,0 +1,553 @@
+PREHOOK: query: CREATE TABLE `testb`(
+   `cola` string COMMENT '',
+   `colb` string COMMENT '',
+   `colc` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+
+STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testb
+POSTHOOK: query: CREATE TABLE `testb`(
+   `cola` string COMMENT '',
+   `colb` string COMMENT '',
+   `colc` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+
+STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testb
+PREHOOK: query: CREATE TABLE `testa`(
+   `col1` string COMMENT '',
+   `col2` string COMMENT '',
+   `col3` string COMMENT '',
+   `col4` string COMMENT '',
+   `col5` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testa
+POSTHOOK: query: CREATE TABLE `testa`(
+   `col1` string COMMENT '',
+   `col2` string COMMENT '',
+   `col3` string COMMENT '',
+   `col4` string COMMENT '',
+   `col5` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testa
+PREHOOK: query: insert into testA partition (part1='US', part2='ABC', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=US/part2=ABC/part3=123
+POSTHOOK: query: insert into testA partition (part1='US', part2='ABC', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=US/part2=ABC/part3=123
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col5 SCRIPT []
+PREHOOK: query: insert into testA partition (part1='UK', part2='DEF', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=UK/part2=DEF/part3=123
+POSTHOOK: query: insert into testA partition (part1='UK', part2='DEF', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=UK/part2=DEF/part3=123
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col5 SCRIPT []
+PREHOOK: query: insert into testA partition (part1='US', part2='DEF', part3='200')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: query: insert into testA partition (part1='US', part2='DEF', part3='200')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col5 SCRIPT []
+PREHOOK: query: insert into testA partition (part1='CA', part2='ABC', part3='300')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=CA/part2=ABC/part3=300
+POSTHOOK: query: insert into testA partition (part1='CA', part2='ABC', part3='300')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=CA/part2=ABC/part3=300
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col5 SCRIPT []
+PREHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='300')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=CA/part2=ABC/part3=300
+POSTHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='300')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=CA/part2=ABC/part3=300
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=300).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=300).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=300).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='400')
+values ( '600', '700', 'abc'), ( '601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=CA/part2=ABC/part3=400
+POSTHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='400')
+values ( '600', '700', 'abc'), ( '601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=CA/part2=ABC/part3=400
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=400).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=400).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=400).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='UK', part2='PQR', part3='500')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=UK/part2=PQR/part3=500
+POSTHOOK: query: insert into testB partition (part1='UK', part2='PQR', part3='500')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=UK/part2=PQR/part3=500
+POSTHOOK: Lineage: testb PARTITION(part1=UK,part2=PQR,part3=500).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=UK,part2=PQR,part3=500).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=UK,part2=PQR,part3=500).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='US', part2='DEF', part3='200')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: query: insert into testB partition (part1='US', part2='DEF', part3='200')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=DEF,part3=200).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=DEF,part3=200).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=DEF,part3=200).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='US', part2='PQR', part3='123')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=US/part2=PQR/part3=123
+POSTHOOK: query: insert into testB partition (part1='US', part2='PQR', part3='123')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=US/part2=PQR/part3=123
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=PQR,part3=123).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=PQR,part3=123).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=PQR,part3=123).colc SCRIPT []
+PREHOOK: query: create view viewDeterministicUDFA partitioned on (vpart1, vpart2, vpart3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testa
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testa
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewDeterministicUDFA
+POSTHOOK: query: create view viewDeterministicUDFA partitioned on (vpart1, vpart2, vpart3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testa
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testa
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewDeterministicUDFA
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol1 EXPRESSION [(testa)testa.FieldSchema(name:col1, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol2 EXPRESSION [(testa)testa.FieldSchema(name:col2, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol3 EXPRESSION [(testa)testa.FieldSchema(name:col3, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol4 EXPRESSION [(testa)testa.FieldSchema(name:col4, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol5 EXPRESSION [(testa)testa.FieldSchema(name:col5, type:string, comment:), ]
+PREHOOK: query: create view viewDeterministicUDFB partitioned on (vpart1, vpart2, vpart3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testb
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testb
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewDeterministicUDFB
+POSTHOOK: query: create view viewDeterministicUDFB partitioned on (vpart1, vpart2, vpart3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testb
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testb
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewDeterministicUDFB
+POSTHOOK: Lineage: viewDeterministicUDFB.vcola EXPRESSION [(testb)testb.FieldSchema(name:cola, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFB.vcolb EXPRESSION [(testb)testb.FieldSchema(name:colb, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFB.vcolc EXPRESSION [(testb)testb.FieldSchema(name:colc, type:string, comment:), ]
+PREHOOK: query: create view viewNoUDFA partitioned on (part1, part2, part3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ part1,
+ part2,
+ part3
+ from testa
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testa
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewNoUDFA
+POSTHOOK: query: create view viewNoUDFA partitioned on (part1, part2, part3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ part1,
+ part2,
+ part3
+ from testa
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testa
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewNoUDFA
+POSTHOOK: Lineage: viewNoUDFA.vcol1 EXPRESSION [(testa)testa.FieldSchema(name:col1, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol2 EXPRESSION [(testa)testa.FieldSchema(name:col2, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol3 EXPRESSION [(testa)testa.FieldSchema(name:col3, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol4 EXPRESSION [(testa)testa.FieldSchema(name:col4, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol5 EXPRESSION [(testa)testa.FieldSchema(name:col5, type:string, comment:), ]
+PREHOOK: query: create view viewNoUDFB partitioned on (part1, part2, part3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ part1,
+ part2,
+ part3
+ from testb
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testb
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewNoUDFB
+POSTHOOK: query: create view viewNoUDFB partitioned on (part1, part2, part3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ part1,
+ part2,
+ part3
+ from testb
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testb
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewNoUDFB
+POSTHOOK: Lineage: viewNoUDFB.vcola EXPRESSION [(testb)testb.FieldSchema(name:cola, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFB.vcolb EXPRESSION [(testb)testb.FieldSchema(name:colb, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFB.vcolc EXPRESSION [(testb)testb.FieldSchema(name:colc, type:string, comment:), ]
+PREHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: testa
+            filterExpr: (part1) IN ('US', 'CA') (type: boolean)
+            Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( col1 AS decimal(38,18)) (type: decimal(38,18)), CAST( col2 AS decimal(38,18)) (type: decimal(38,18)), CAST( col3 AS decimal(38,18)) (type: decimal(38,18))
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: 'US' (type: char(2)), 'DEF' (type: char(3)), '200' (type: char(3))
+                sort order: +++
+                Map-reduce partition columns: 'US' (type: char(2)), 'DEF' (type: char(3)), '200' (type: char(3))
+                Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18))
+          TableScan
+            alias: testb
+            filterExpr: (part1) IN ('US', 'CA') (type: boolean)
+            Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( cola AS decimal(38,18)) (type: decimal(38,18)), CAST( colb AS decimal(38,18)) (type: decimal(38,18)), CAST( part1 AS CHAR(2)) (type: char(2)), CAST( part2 AS CHAR(3)) (type: char(3))
+              outputColumnNames: _col0, _col1, _col3, _col4
+              Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col3 (type: char(2)), _col4 (type: char(3)), '200' (type: char(3))
+                sort order: +++
+                Map-reduce partition columns: _col3 (type: char(2)), _col4 (type: char(3)), '200' (type: char(3))
+                Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18))
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col5 (type: char(2)), _col6 (type: char(3)), _col7 (type: char(3))
+            1 _col3 (type: char(2)), _col4 (type: char(3)), _col5 (type: char(3))
+          outputColumnNames: _col0, _col1, _col2, _col8, _col9
+          Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col8 (type: decimal(38,18)), _col9 (type: decimal(38,18))
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testa
+PREHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@testb
+PREHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@viewdeterministicudfa
+PREHOOK: Input: default@viewdeterministicudfb
+#### A masked pattern was here ####
+POSTHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testa
+POSTHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@testb
+POSTHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@viewdeterministicudfa
+POSTHOOK: Input: default@viewdeterministicudfb
+#### A masked pattern was here ####
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	601.000000000000000000	701.000000000000000000
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	600.000000000000000000	700.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	601.000000000000000000	701.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	600.000000000000000000	700.000000000000000000
+PREHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: testa
+            filterExpr: ((part1) IN ('US', 'CA') and (part1 = 'US') and (part2 = 'DEF') and (part3 = '200')) (type: boolean)
+            Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( col1 AS decimal(38,18)) (type: decimal(38,18)), CAST( col2 AS decimal(38,18)) (type: decimal(38,18)), CAST( col3 AS decimal(38,18)) (type: decimal(38,18))
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: 'US' (type: string), 'DEF' (type: string), '200' (type: string)
+                sort order: +++
+                Map-reduce partition columns: 'US' (type: string), 'DEF' (type: string), '200' (type: string)
+                Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18))
+          TableScan
+            alias: testb
+            filterExpr: ((part1) IN ('US', 'CA') and (part3 = '200') and part1 is not null and part2 is not null) (type: boolean)
+            Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( cola AS decimal(38,18)) (type: decimal(38,18)), CAST( colb AS decimal(38,18)) (type: decimal(38,18)), part1 (type: string), part2 (type: string)
+              outputColumnNames: _col0, _col1, _col3, _col4
+              Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col3 (type: string), _col4 (type: string), '200' (type: string)
+                sort order: +++
+                Map-reduce partition columns: _col3 (type: string), _col4 (type: string), '200' (type: string)
+                Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18))
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col5 (type: string), _col6 (type: string), _col7 (type: string)
+            1 _col3 (type: string), _col4 (type: string), _col5 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col8, _col9
+          Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col8 (type: decimal(38,18)), _col9 (type: decimal(38,18))
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testa
+PREHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@testb
+PREHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@viewnoudfa
+PREHOOK: Input: default@viewnoudfb
+#### A masked pattern was here ####
+POSTHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testa
+POSTHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@testb
+POSTHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@viewnoudfa
+POSTHOOK: Input: default@viewnoudfb
+#### A masked pattern was here ####
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	601.000000000000000000	701.000000000000000000
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	600.000000000000000000	700.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	601.000000000000000000	701.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	600.000000000000000000	700.000000000000000000

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/ppd_udf_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_udf_col.q.out b/ql/src/test/results/clientpositive/ppd_udf_col.q.out
index 8d858f5..dfc2d04 100644
--- a/ql/src/test/results/clientpositive/ppd_udf_col.q.out
+++ b/ql/src/test/results/clientpositive/ppd_udf_col.q.out
@@ -366,3 +366,412 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col2 <= 0.1) (type: boolean)
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((_col2 <= 0.1) and (_col2 > 0.1)) (type: boolean)
+                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 20
+                      Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double), '4' (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: ((key = 100) and ((value * 10) <= 200.0D)) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((value * 10) <= 200.0D) and (key = 100)) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double), (value * 10) (type: double)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col2 <= 0.1) (type: boolean)
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col2 <= 0.1) (type: boolean)
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    Filter Operator
+                      predicate: (_col1 > 0.1) (type: boolean)
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      Limit
+                        Number of rows: 20
+                        Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                        File Output Operator
+                          compressed: false
+                          Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                          table:
+                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key = 100) and false) (type: boolean)
+              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: false (type: boolean)
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), '4' (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: ((key = 100) and ((value * 10) <= 200.0D)) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((value * 10) <= 200.0D) and (key = 100)) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double), (value * 10) (type: double)
+                outputColumnNames: _col0, _col2, _col3
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col3 <= 200.0D) (type: boolean)
+                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), _col3 (type: double)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/union_offcbo.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_offcbo.q.out b/ql/src/test/results/clientpositive/union_offcbo.q.out
index ab0e394..ce27bf2 100644
--- a/ql/src/test/results/clientpositive/union_offcbo.q.out
+++ b/ql/src/test/results/clientpositive/union_offcbo.q.out
@@ -288,21 +288,18 @@ STAGE PLANS:
           outputColumnNames: _col8, _col9, _col10, _col12, _col13, _col16, _col17, _col18, _col19
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and _col18 is not null) (type: boolean)
+            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and (CASE WHEN ((_col18 is not null and _col8 is null and (_col12 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col18 is not null and _col8 is null and (_col12 <= '2016-02-05'))) THEN ('RET') WHEN (((_col18 = _col8) and (_col19 <> _col9))) THEN ('A_INS') ELSE ('NA') END <> 'RET') and _col18 is not null) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col10 (type: bigint), _col16 (type: string), _col17 (type: bigint), _col13 (type: string), _col18 (type: string), _col19 (type: string), CASE WHEN ((_col18 is not null and _col8 is null and (_col12 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col18 is not null and _col8 is null and (_col12 <= '2016-02-05'))) THEN ('RET') WHEN (((_col18 = _col8) and (_col19 <> _col9))) THEN ('A_INS') ELSE ('NA') END (type: string)
               outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col6, _col7
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Filter Operator
-                predicate: (_col7 <> 'RET') (type: boolean)
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -383,21 +380,18 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col6, _col7, _col8, _col9, _col11, _col18, _col19
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and _col8 is not null) (type: boolean)
+            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and (CASE WHEN ((_col18 is not null and _col8 is null and (_col11 <= _col1))) THEN ('DEL') WHEN (((_col18 is null and _col8 is not null) or ((_col18 = _col8) and (_col19 <> _col9)))) THEN ('INS') ELSE ('NA') END <> 'RET') and _col8 is not null) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col0 (type: bigint), _col6 (type: string), _col7 (type: bigint), '2099-12-31' (type: string), _col8 (type: string), _col9 (type: string), CASE WHEN ((_col18 is not null and _col8 is null and (_col11 <= _col1))) THEN ('DEL') WHEN (((_col18 is null and _col8 is not null) or ((_col18 = _col8) and (_col19 <> _col9)))) THEN ('INS') ELSE ('NA') END (type: string)
               outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col6, _col7
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Filter Operator
-                predicate: (_col7 <> 'RET') (type: boolean)
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Fetch Operator