You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jc...@apache.org on 2017/07/13 18:20:52 UTC

[01/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/master 65b351bc3 -> 4a567f865


http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
index 61dc19c..43ad576 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
@@ -33,17 +33,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '20') and (key > '15')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -51,17 +51,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key > '15') and (key < '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -72,10 +72,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -268,17 +268,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '20') and (key > '15')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -286,17 +286,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key > '15') and (key < '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -307,10 +307,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
index c47a424..0d8ea1d 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
@@ -33,17 +33,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '20') and (key > '15')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -51,17 +51,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key > '15') and (key < '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -72,10 +72,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -268,17 +268,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '20') and (key > '15')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -286,17 +286,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key > '15') and (key < '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -307,10 +307,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
index 711c4c4..7982939 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
@@ -39,17 +39,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -57,34 +57,34 @@ STAGE PLANS:
                   alias: c
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((sqrt(key) <> 13.0) and (key < '20') and (key > '15')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key > '15') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -97,14 +97,14 @@ STAGE PLANS:
                   1 _col0 (type: string)
                   2 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                  Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -413,17 +413,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -431,34 +431,34 @@ STAGE PLANS:
                   alias: c
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((sqrt(key) <> 13.0) and (key < '20') and (key > '15')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key > '15') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -471,14 +471,14 @@ STAGE PLANS:
                   1 _col0 (type: string)
                   2 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                  Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
index 907977b..167daf7 100644
--- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
@@ -35,18 +35,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -109,18 +109,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -236,13 +236,13 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -341,18 +341,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -466,18 +466,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -542,17 +542,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col3, _col4
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -651,18 +651,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -725,18 +725,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -852,13 +852,13 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -957,18 +957,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -1082,18 +1082,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -1158,17 +1158,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col3, _col4
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
index 56d6bb9..7493ee4 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
@@ -603,34 +603,34 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -639,7 +639,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -755,34 +755,34 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -791,7 +791,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1512,34 +1512,34 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -1548,7 +1548,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_date_1.q.out b/ql/src/test/results/clientpositive/vector_date_1.q.out
index 6762374..3fba352 100644
--- a/ql/src/test/results/clientpositive/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_date_1.q.out
@@ -591,7 +591,7 @@ STAGE PLANS:
             alias: vector_date_1
             Statistics: Num rows: 3 Data size: 224 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((dt1 = 2001-01-01) and (2001-01-01 = dt1) and (dt1 <> 1970-01-01) and (1970-01-01 <> dt1) and (dt1 > 1970-01-01) and (dt1 >= 1970-01-01) and (1970-01-01 < dt1) and (1970-01-01 <= dt1)) (type: boolean)
+              predicate: ((2001-01-01 = dt1) and (dt1 <> 1970-01-01) and (1970-01-01 <> dt1)) (type: boolean)
               Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 2001-01-01 (type: date), dt2 (type: date)


[10/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4a567f86
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4a567f86
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4a567f86

Branch: refs/heads/master
Commit: 4a567f86504b81e74cd0f7e5fb5375712ce4a62f
Parents: 65b351b
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Jun 27 11:05:48 2017 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Jul 13 20:20:35 2017 +0200

----------------------------------------------------------------------
 .../positive/accumulo_predicate_pushdown.q.out  |    3 +-
 .../results/positive/accumulo_queries.q.out     |    2 +-
 data/conf/hive-log4j2.properties                |    5 +-
 .../results/positive/hbase_ppd_key_range.q.out  |    3 +-
 .../test/results/positive/hbase_queries.q.out   |    2 +-
 .../test/results/positive/ppd_key_ranges.q.out  |   34 +-
 pom.xml                                         |    6 +-
 .../ql/optimizer/calcite/HiveCalciteUtil.java   |    6 +
 .../optimizer/calcite/HiveTypeSystemImpl.java   |   17 +-
 .../HiveDruidProjectFilterTransposeRule.java    |   48 -
 .../HiveMaterializedViewFilterScanRule.java     |   92 -
 .../rules/views/HiveMaterializedViewRule.java   |   38 +
 .../MaterializedViewSubstitutionVisitor.java    |  292 ---
 .../rules/views/SubstitutionVisitor.java        | 2458 ------------------
 .../calcite/translator/ASTBuilder.java          |   31 +-
 .../calcite/translator/ExprNodeConverter.java   |   22 +-
 .../calcite/translator/RexNodeConverter.java    |   62 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   10 +-
 .../clientpositive/materialized_view_create.q   |    6 +
 .../materialized_view_create_rewrite.q          |    2 +
 .../results/clientpositive/auto_join12.q.out    |   22 +-
 .../results/clientpositive/auto_join16.q.out    |   16 +-
 .../results/clientpositive/auto_join4.q.out     |    6 +-
 .../results/clientpositive/auto_join5.q.out     |    6 +-
 .../results/clientpositive/auto_join8.q.out     |    6 +-
 .../materialized_view_create_rewrite.q.out      |    8 +
 .../clientpositive/cbo_rp_auto_join1.q.out      |   18 +-
 .../clientpositive/cbo_rp_outer_join_ppr.q.out  |   44 +-
 .../results/clientpositive/druid_basic2.q.out   |  103 +-
 .../clientpositive/druid_intervals.q.out        |  130 +-
 .../clientpositive/druid_timeseries.q.out       |    6 +-
 .../results/clientpositive/druid_topn.q.out     |  268 +-
 .../results/clientpositive/filter_union.q.out   |  356 +--
 .../clientpositive/fouter_join_ppr.q.out        |   84 +-
 .../clientpositive/groupby_position.q.out       |   32 +-
 .../clientpositive/index_auto_mult_tables.q.out |   52 +-
 .../index_auto_mult_tables_compact.q.out        |   52 +-
 ql/src/test/results/clientpositive/join12.q.out |   30 +-
 ql/src/test/results/clientpositive/join16.q.out |   22 +-
 ql/src/test/results/clientpositive/join4.q.out  |    8 +-
 ql/src/test/results/clientpositive/join5.q.out  |    8 +-
 ql/src/test/results/clientpositive/join8.q.out  |    8 +-
 .../clientpositive/join_filters_overlap.q.out   |    4 +-
 .../llap/auto_smb_mapjoin_14.q.out              |   42 +-
 .../llap/auto_sortmerge_join_9.q.out            |   96 +-
 .../llap/bucket_map_join_tez2.q.out             |   34 +-
 .../clientpositive/llap/filter_union.q.out      |  236 +-
 .../clientpositive/llap/smb_mapjoin_14.q.out    |   54 +-
 .../clientpositive/llap/subquery_scalar.q.out   |  236 +-
 .../llap/vector_auto_smb_mapjoin_14.q.out       |   42 +-
 .../clientpositive/llap/vector_date_1.q.out     |    2 +-
 .../clientpositive/louter_join_ppr.q.out        |   84 +-
 .../test/results/clientpositive/masking_4.q.out |   16 +-
 .../materialized_view_create.q.out              |   40 +
 .../materialized_view_create_rewrite.q.out      |    8 +
 .../results/clientpositive/outer_join_ppr.q.out |   40 +-
 .../results/clientpositive/perf/query1.q.out    |   34 +-
 .../results/clientpositive/perf/query30.q.out   |   94 +-
 .../results/clientpositive/perf/query6.q.out    |  304 ++-
 .../results/clientpositive/perf/query81.q.out   |   96 +-
 .../test/results/clientpositive/ppd_gby.q.out   |   32 +-
 .../test/results/clientpositive/ppd_gby2.q.out  |   44 +-
 .../results/clientpositive/ppd_gby_join.q.out   |   60 +-
 .../test/results/clientpositive/ppd_join.q.out  |   48 +-
 .../clientpositive/ppd_outer_join1.q.out        |   40 +-
 .../clientpositive/ppd_outer_join2.q.out        |   40 +-
 .../clientpositive/ppd_outer_join3.q.out        |   40 +-
 .../clientpositive/ppd_outer_join4.q.out        |   60 +-
 .../test/results/clientpositive/ppd_union.q.out |   82 +-
 ql/src/test/results/clientpositive/ppd_vc.q.out |   18 +-
 .../clientpositive/rcfile_null_value.q.out      |    8 +-
 .../clientpositive/router_join_ppr.q.out        |   84 +-
 .../clientpositive/spark/auto_join12.q.out      |   22 +-
 .../clientpositive/spark/auto_join16.q.out      |   16 +-
 .../clientpositive/spark/auto_join4.q.out       |    6 +-
 .../clientpositive/spark/auto_join5.q.out       |    6 +-
 .../clientpositive/spark/auto_join8.q.out       |    6 +-
 .../spark/auto_smb_mapjoin_14.q.out             |   24 +-
 .../spark/auto_sortmerge_join_9.q.out           |   66 +-
 .../spark/bucket_map_join_tez2.q.out            |   28 +-
 .../clientpositive/spark/groupby_position.q.out |   38 +-
 .../results/clientpositive/spark/join12.q.out   |   30 +-
 .../results/clientpositive/spark/join16.q.out   |   22 +-
 .../results/clientpositive/spark/join4.q.out    |    8 +-
 .../results/clientpositive/spark/join5.q.out    |    8 +-
 .../results/clientpositive/spark/join8.q.out    |    8 +-
 .../spark/join_filters_overlap.q.out            |    4 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |   84 +-
 .../clientpositive/spark/outer_join_ppr.q.out   |   40 +-
 .../clientpositive/spark/ppd_gby_join.q.out     |   60 +-
 .../results/clientpositive/spark/ppd_join.q.out |   48 +-
 .../clientpositive/spark/ppd_outer_join1.q.out  |   40 +-
 .../clientpositive/spark/ppd_outer_join2.q.out  |   40 +-
 .../clientpositive/spark/ppd_outer_join3.q.out  |   40 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |   60 +-
 .../clientpositive/spark/router_join_ppr.q.out  |   84 +-
 .../clientpositive/spark/smb_mapjoin_14.q.out   |   54 +-
 .../results/clientpositive/vector_date_1.q.out  |    2 +-
 98 files changed, 1810 insertions(+), 5476 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out b/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
index 93b10a8..dffd5a2 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out
@@ -312,10 +312,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: accumulo_pushdown
-            filterExpr: ((key < '80') and (key > '90')) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (value like '%90%') (type: boolean)
+              predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
----------------------------------------------------------------------
diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
index de82857..b3adf4e 100644
--- a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
+++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out
@@ -289,7 +289,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: accumulo_table_1
-            filterExpr: ((100 < key) and (key < 120)) (type: boolean)
+            filterExpr: (key < 120) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/data/conf/hive-log4j2.properties
----------------------------------------------------------------------
diff --git a/data/conf/hive-log4j2.properties b/data/conf/hive-log4j2.properties
index 1124d57..acaf2b4 100644
--- a/data/conf/hive-log4j2.properties
+++ b/data/conf/hive-log4j2.properties
@@ -50,7 +50,7 @@ appender.DRFA.strategy.type = DefaultRolloverStrategy
 appender.DRFA.strategy.max = 30
 
 # list of all loggers
-loggers = HadoopIPC, HadoopSecurity, Hdfs, HdfsServer, HadoopMetrics2, Mortbay, Yarn, YarnServer, Tez, HadoopConf, Zookeeper, ServerCnxn, NIOServerCnxn, ClientCnxn, ClientCnxnSocket, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, Operator, Serde2Lazy, ObjectStore
+loggers = HadoopIPC, HadoopSecurity, Hdfs, HdfsServer, HadoopMetrics2, Mortbay, Yarn, YarnServer, Tez, HadoopConf, Zookeeper, ServerCnxn, NIOServerCnxn, ClientCnxn, ClientCnxnSocket, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, Operator, Serde2Lazy, ObjectStore, CalcitePlanner
 
 logger.HadoopIPC.name = org.apache.hadoop.ipc
 logger.HadoopIPC.level = WARN
@@ -118,6 +118,9 @@ logger.Serde2Lazy.level = INFO
 logger.ObjectStore.name = org.apache.hadoop.hive.metastore.ObjectStore
 logger.ObjectStore.level = INFO
 
+logger.CalcitePlanner.name = org.apache.calcite.plan.RelOptPlanner
+logger.CalcitePlanner.level = INFO
+
 # root logger
 rootLogger.level = ${sys:hive.log.level}
 rootLogger.appenderRefs = root, console

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
index 0d72bdc..d3c4a99 100644
--- a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
@@ -312,10 +312,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hbase_pushdown
-            filterExpr: ((key < '80') and (key > '90')) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (value like '%90%') (type: boolean)
+              predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/hbase-handler/src/test/results/positive/hbase_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index 276b6b8..43a5a25 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -291,7 +291,7 @@ STAGE PLANS:
             alias: hbase_table_1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((100 < key) and (key < 120)) (type: boolean)
+              predicate: (key < 120) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out b/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
index 1897777..633e435 100644
--- a/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
+++ b/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
@@ -148,36 +148,22 @@ PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from hbase_ppd_keyrange where key >= 9 and key < 17 and key = 11
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: hbase_ppd_keyrange
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: (((key >= 9) and (key < 17)) and (key = 11)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: 11 (type: int), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: hbase_ppd_keyrange
+          filterExpr: (key = 11) (type: boolean)
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: 11 (type: int), value (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: select * from hbase_ppd_keyrange where key >=9  and key < 17 and key = 11
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 1b89632..32f5fd1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -115,10 +115,10 @@
     <antlr.version>3.5.2</antlr.version>
     <apache-directory-server.version>1.5.6</apache-directory-server.version>
     <apache-directory-clientapi.version>0.1</apache-directory-clientapi.version>
-    <avatica.version>1.9.0</avatica.version>
+    <avatica.version>1.10.0</avatica.version>
     <avro.version>1.7.7</avro.version>
     <bonecp.version>0.8.0.RELEASE</bonecp.version>
-    <calcite.version>1.12.0</calcite.version>
+    <calcite.version>1.13.0</calcite.version>
     <datanucleus-api-jdo.version>4.2.4</datanucleus-api-jdo.version>
     <datanucleus-core.version>4.1.17</datanucleus-core.version>
     <datanucleus-rdbms.version>4.1.19</datanucleus-rdbms.version>
@@ -131,7 +131,7 @@
     <commons-httpclient.version>3.0.1</commons-httpclient.version>
     <commons-io.version>2.4</commons-io.version>
     <commons-lang.version>2.6</commons-lang.version>
-    <commons-lang3.version>3.1</commons-lang3.version>
+    <commons-lang3.version>3.2</commons-lang3.version>
     <commons-pool.version>1.5.4</commons-pool.version>
     <commons-dbcp.version>1.4</commons-dbcp.version>
     <derby.version>10.10.2.0</derby.version>

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
index e339d0a..5f34e8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
@@ -89,6 +89,7 @@ import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
+import org.apache.calcite.rex.RexTableInputRef;
 
 /**
  * Generic utility functions needed for Calcite based Hive CBO.
@@ -1080,6 +1081,11 @@ public class HiveCalciteUtil {
     public Boolean visitPatternFieldRef(RexPatternFieldRef fieldRef) {
       return false;
     }
+
+    @Override
+    public Boolean visitTableInputRef(RexTableInputRef fieldRef) {
+      return false;
+    }
   }
 
   public static Set<Integer> getInputRefs(RexNode expr) {

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
index 279d101..dad783d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveTypeSystemImpl.java
@@ -26,8 +26,14 @@ public class HiveTypeSystemImpl extends RelDataTypeSystemImpl {
   private static final int MAX_DECIMAL_PRECISION     = 38;
   private static final int MAX_DECIMAL_SCALE         = 38;
   private static final int DEFAULT_DECIMAL_PRECISION = 10;
-  private static final int MAX_VARCHAR_PRECISION     = 65535;
-  private static final int MAX_CHAR_PRECISION        = 255;
+  // STRING type in Hive is represented as VARCHAR with precision Integer.MAX_VALUE.
+  // In turn, the max VARCHAR precision should be 65535. However, the value is not
+  // used for validation, but rather only internally by the optimizer to know the max
+  // precision supported by the system. Thus, no VARCHAR precision should fall between
+  // 65535 and Integer.MAX_VALUE; the check for VARCHAR precision is done in Hive.
+  private static final int MAX_CHAR_PRECISION        = Integer.MAX_VALUE;
+  private static final int DEFAULT_VARCHAR_PRECISION = 65535;
+  private static final int DEFAULT_CHAR_PRECISION    = 255;
   private static final int MAX_BINARY_PRECISION      = Integer.MAX_VALUE;
   private static final int MAX_TIMESTAMP_PRECISION   = 9;
 
@@ -60,13 +66,15 @@ public class HiveTypeSystemImpl extends RelDataTypeSystemImpl {
     switch (typeName) {
     // Hive will always require user to specify exact sizes for char, varchar;
     // Binary doesn't need any sizes; Decimal has the default of 10.
-    case CHAR:
-    case VARCHAR:
     case BINARY:
     case VARBINARY:
     case TIME:
     case TIMESTAMP:
       return getMaxPrecision(typeName);
+    case CHAR:
+      return DEFAULT_CHAR_PRECISION;
+    case VARCHAR:
+      return DEFAULT_VARCHAR_PRECISION;
     case DECIMAL:
       return DEFAULT_DECIMAL_PRECISION;
     case INTERVAL_YEAR:
@@ -94,7 +102,6 @@ public class HiveTypeSystemImpl extends RelDataTypeSystemImpl {
     case DECIMAL:
       return getMaxNumericPrecision();
     case VARCHAR:
-      return MAX_VARCHAR_PRECISION;
     case CHAR:
       return MAX_CHAR_PRECISION;
     case VARBINARY:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java
deleted file mode 100644
index dd39056..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveDruidProjectFilterTransposeRule.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
-
-import org.apache.calcite.adapter.druid.DruidQuery;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.rules.ProjectFilterTransposeRule;
-import org.apache.calcite.rel.rules.PushProjector;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-
-/**
- * Rule to push a {@link org.apache.calcite.rel.core.Project}
- * past a {@link org.apache.calcite.rel.core.Filter}
- * when {@code Filter} is on top of a {@link DruidQuery}.
- * TODO: Replace this class with calcite DruidRules.DruidFilterProjectTransposeRule
- * once we upgrade to calcite 1.13
- */
-public class HiveDruidProjectFilterTransposeRule
-    extends ProjectFilterTransposeRule {
-
-  public static final HiveDruidProjectFilterTransposeRule INSTANCE =
-      new HiveDruidProjectFilterTransposeRule();
-
-  private HiveDruidProjectFilterTransposeRule() {
-    super(
-        operand(Project.class,
-            operand(Filter.class, operand(DruidQuery.class, none()))),
-        PushProjector.ExprCondition.FALSE,
-        HiveRelFactories.HIVE_BUILDER);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
deleted file mode 100644
index 81de33f..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.calcite.plan.RelOptMaterialization;
-import org.apache.calcite.plan.RelOptMaterializations;
-import org.apache.calcite.plan.RelOptPlanner;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.volcano.VolcanoPlanner;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * Planner rule that replaces (if possible)
- * a {@link org.apache.calcite.rel.core.Project}
- * on a {@link org.apache.calcite.rel.core.Filter}
- * on a {@link org.apache.calcite.rel.core.TableScan}
- * to use a Materialized View.
- */
-public class HiveMaterializedViewFilterScanRule extends RelOptRule {
-
-  public static final HiveMaterializedViewFilterScanRule INSTANCE =
-      new HiveMaterializedViewFilterScanRule(HiveRelFactories.HIVE_BUILDER);
-
-
-  //~ Constructors -----------------------------------------------------------
-
-  /** Creates a HiveMaterializedViewFilterScanRule. */
-  protected HiveMaterializedViewFilterScanRule(RelBuilderFactory relBuilderFactory) {
-    super(operand(Project.class, operand(Filter.class, operand(TableScan.class, null, none()))),
-            relBuilderFactory, "MaterializedViewFilterScanRule");
-  }
-
-  //~ Methods ----------------------------------------------------------------
-
-  public void onMatch(RelOptRuleCall call) {
-    final Project project = call.rel(0);
-    final Filter filter = call.rel(1);
-    final TableScan scan = call.rel(2);
-    apply(call, project, filter, scan);
-  }
-
-  protected void apply(RelOptRuleCall call, Project project, Filter filter, TableScan scan) {
-    RelOptPlanner planner = call.getPlanner();
-    List<RelOptMaterialization> materializations =
-        (planner instanceof VolcanoPlanner)
-            ? ((VolcanoPlanner) planner).getMaterializations()
-            : ImmutableList.<RelOptMaterialization>of();
-    if (!materializations.isEmpty()) {
-      RelNode root = project.copy(project.getTraitSet(), Collections.singletonList(
-          filter.copy(filter.getTraitSet(), Collections.singletonList(
-              (RelNode) scan))));
-      // Costing is done in transformTo(), so we call it repeatedly with all applicable
-      // materialized views and cheapest one will be picked
-      List<RelOptMaterialization> applicableMaterializations =
-          RelOptMaterializations.getApplicableMaterializations(root, materializations);
-      for (RelOptMaterialization materialization : applicableMaterializations) {
-        List<RelNode> subs = new MaterializedViewSubstitutionVisitor(
-            materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel);
-        for (RelNode s : subs) {
-          call.transformTo(s);
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewRule.java
new file mode 100644
index 0000000..2fa1138
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewRule.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views;
+
+import org.apache.calcite.rel.rules.AbstractMaterializedViewRule.MaterializedViewOnlyFilterRule;
+import org.apache.calcite.rel.rules.AbstractMaterializedViewRule.MaterializedViewProjectFilterRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
+
+/**
+ * Planner rule that replaces (if possible) a {@link org.apache.calcite.rel.core.Project}
+ * on a {@link org.apache.calcite.rel.core.Filter} and {@link org.apache.calcite.rel.core.Filter}
+ * or a {@link org.apache.calcite.rel.core.Filter} on a {@link org.apache.calcite.rel.core.TableScan}
+ * to use a Materialized View.
+ */
+public class HiveMaterializedViewRule {
+
+  public static final MaterializedViewProjectFilterRule INSTANCE_PROJECT_FILTER =
+      new MaterializedViewProjectFilterRule(HiveRelFactories.HIVE_BUILDER, false);
+
+  public static final MaterializedViewOnlyFilterRule INSTANCE_FILTER =
+      new MaterializedViewOnlyFilterRule(HiveRelFactories.HIVE_BUILDER, false);
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java
deleted file mode 100644
index e32f1a6..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java
+++ /dev/null
@@ -1,292 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views;
-
-import java.util.List;
-
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexShuttle;
-import org.apache.calcite.tools.RelBuilderFactory;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * Extension to {@link SubstitutionVisitor}.
- *
- * TODO: Remove when we upgrade to Calcite version using builders.
- */
-public class MaterializedViewSubstitutionVisitor extends SubstitutionVisitor {
-  private static final ImmutableList<UnifyRule> EXTENDED_RULES =
-      ImmutableList.<UnifyRule>builder()
-          .addAll(DEFAULT_RULES)
-          .add(ProjectToProjectUnifyRule1.INSTANCE)
-          .add(FilterToFilterUnifyRule1.INSTANCE)
-          .add(FilterToProjectUnifyRule1.INSTANCE)
-          .build();
-
-  public MaterializedViewSubstitutionVisitor(RelNode target_, RelNode query_) {
-    super(target_, query_, EXTENDED_RULES);
-  }
-
-  public MaterializedViewSubstitutionVisitor(RelNode target_, RelNode query_,
-      RelBuilderFactory relBuilderFactory) {
-    super(target_, query_, EXTENDED_RULES, relBuilderFactory);
-  }
-
-  public List<RelNode> go(RelNode replacement_) {
-    return super.go(replacement_);
-  }
-
-  /**
-   * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a
-   * {@link SubstitutionVisitor.MutableProject} to a
-   * {@link SubstitutionVisitor.MutableProject} where the condition of the target
-   * relation is weaker.
-   *
-   * <p>Example: target has a weaker condition and contains all columns selected
-   * by query</p>
-   * <ul>
-   * <li>query:   Project(projects: [$2, $0])
-   *                Filter(condition: &gt;($1, 20))
-   *                  Scan(table: [hr, emps])</li>
-   * <li>target:  Project(projects: [$0, $1, $2])
-   *                Filter(condition: &gt;($1, 10))
-   *                  Scan(table: [hr, emps])</li>
-   * </ul>
-   */
-  private static class ProjectToProjectUnifyRule1 extends AbstractUnifyRule {
-    public static final ProjectToProjectUnifyRule1 INSTANCE =
-        new ProjectToProjectUnifyRule1();
-
-    private ProjectToProjectUnifyRule1() {
-      super(operand(MutableProject.class, query(0)),
-          operand(MutableProject.class, target(0)), 1);
-    }
-
-    @Override protected UnifyResult apply(UnifyRuleCall call) {
-      final MutableProject query = (MutableProject) call.query;
-
-      final List<RelDataTypeField> oldFieldList =
-          query.getInput().getRowType().getFieldList();
-      final List<RelDataTypeField> newFieldList =
-          call.target.getRowType().getFieldList();
-      List<RexNode> newProjects;
-      try {
-        newProjects = transformRex(query.getProjects(), oldFieldList, newFieldList);
-      } catch (MatchFailed e) {
-        return null;
-      }
-
-      final MutableProject newProject =
-          MutableProject.of(
-              query.getRowType(), call.target, newProjects);
-
-      final MutableRel newProject2 = MutableRels.strip(newProject);
-      return call.result(newProject2);
-    }
-
-    @Override protected UnifyRuleCall match(SubstitutionVisitor visitor,
-        MutableRel query, MutableRel target) {
-      assert query instanceof MutableProject && target instanceof MutableProject;
-
-      if (queryOperand.matches(visitor, query)) {
-        if (targetOperand.matches(visitor, target)) {
-          return null;
-        } else if (targetOperand.isWeaker(visitor, target)) {
-
-          final MutableProject queryProject = (MutableProject) query;
-          if (queryProject.getInput() instanceof MutableFilter) {
-            final MutableFilter innerFilter =
-                (MutableFilter) queryProject.getInput();
-            RexNode newCondition;
-            try {
-              newCondition = transformRex(innerFilter.getCondition(),
-                  innerFilter.getInput().getRowType().getFieldList(),
-                  target.getRowType().getFieldList());
-            } catch (MatchFailed e) {
-              return null;
-            }
-            final MutableFilter newFilter = MutableFilter.of(target,
-                newCondition);
-
-            return visitor.new UnifyRuleCall(this, query, newFilter,
-                copy(visitor.slots, slotCount));
-          }
-        }
-      }
-      return null;
-    }
-  }
-
-  /**
-   * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a
-   * {@link SubstitutionVisitor.MutableFilter} to a
-   * {@link SubstitutionVisitor.MutableFilter} where the condition of the target
-   * relation is weaker.
-   *
-   * <p>Example: target has a weaker condition</p>
-   * <ul>
-   * <li>query:   Filter(condition: &gt;($1, 20))
-   *                Scan(table: [hr, emps])</li>
-   * <li>target:  Filter(condition: &gt;($1, 10))
-   *                Scan(table: [hr, emps])</li>
-   * </ul>
-   */
-  private static class FilterToFilterUnifyRule1 extends AbstractUnifyRule {
-    public static final FilterToFilterUnifyRule1 INSTANCE =
-        new FilterToFilterUnifyRule1();
-
-    private FilterToFilterUnifyRule1() {
-      super(operand(MutableFilter.class, query(0)),
-          operand(MutableFilter.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      final MutableFilter query = (MutableFilter) call.query;
-      final MutableFilter target = (MutableFilter) call.target;
-      final MutableFilter newFilter = MutableFilter.of(target, query.getCondition());
-      return call.result(newFilter);
-    }
-
-    @Override protected UnifyRuleCall match(SubstitutionVisitor visitor,
-        MutableRel query, MutableRel target) {
-      if (queryOperand.matches(visitor, query)) {
-        if (targetOperand.matches(visitor, target)) {
-          if (visitor.isWeaker(query, target)) {
-            return visitor.new UnifyRuleCall(this, query, target,
-                copy(visitor.slots, slotCount));
-          }
-        }
-      }
-      return null;
-    }
-  }
-
-  /**
-   * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a
-   * {@link SubstitutionVisitor.MutableFilter} to a
-   * {@link SubstitutionVisitor.MutableProject} on top of a
-   * {@link SubstitutionVisitor.MutableFilter} where the condition of the target
-   * relation is weaker.
-   *
-   * <p>Example: target has a weaker condition and is a permutation projection of
-   * its child relation</p>
-   * <ul>
-   * <li>query:   Filter(condition: &gt;($1, 20))
-   *                Scan(table: [hr, emps])</li>
-   * <li>target:  Project(projects: [$1, $0, $2, $3, $4])
-   *                Filter(condition: &gt;($1, 10))
-   *                  Scan(table: [hr, emps])</li>
-   * </ul>
-   */
-  private static class FilterToProjectUnifyRule1 extends AbstractUnifyRule {
-    public static final FilterToProjectUnifyRule1 INSTANCE =
-        new FilterToProjectUnifyRule1();
-
-    private FilterToProjectUnifyRule1() {
-      super(
-          operand(MutableFilter.class, query(0)),
-          operand(MutableProject.class,
-              operand(MutableFilter.class, target(0))), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      final MutableRel query = call.query;
-
-      final List<RelDataTypeField> oldFieldList =
-          query.getRowType().getFieldList();
-      final List<RelDataTypeField> newFieldList =
-          call.target.getRowType().getFieldList();
-      List<RexNode> newProjects;
-      try {
-        newProjects = transformRex(
-            (List<RexNode>) call.getCluster().getRexBuilder().identityProjects(
-                query.getRowType()),
-            oldFieldList, newFieldList);
-      } catch (MatchFailed e) {
-        return null;
-      }
-
-      final MutableProject newProject =
-          MutableProject.of(
-              query.getRowType(), call.target, newProjects);
-
-      final MutableRel newProject2 = MutableRels.strip(newProject);
-      return call.result(newProject2);
-    }
-
-    @Override protected UnifyRuleCall match(SubstitutionVisitor visitor,
-        MutableRel query, MutableRel target) {
-      assert query instanceof MutableFilter && target instanceof MutableProject;
-
-      if (queryOperand.matches(visitor, query)) {
-        if (targetOperand.matches(visitor, target)) {
-          if (visitor.isWeaker(query, ((MutableProject) target).getInput())) {
-            final MutableFilter filter = (MutableFilter) query;
-            RexNode newCondition;
-            try {
-              newCondition = transformRex(filter.getCondition(),
-                  filter.getInput().getRowType().getFieldList(),
-                  target.getRowType().getFieldList());
-            } catch (MatchFailed e) {
-              return null;
-            }
-            final MutableFilter newFilter = MutableFilter.of(target,
-                newCondition);
-            return visitor.new UnifyRuleCall(this, query, newFilter,
-                copy(visitor.slots, slotCount));
-          }
-        }
-      }
-      return null;
-    }
-  }
-
-  private static RexNode transformRex(RexNode node,
-      final List<RelDataTypeField> oldFields,
-      final List<RelDataTypeField> newFields) {
-    List<RexNode> nodes =
-        transformRex(ImmutableList.of(node), oldFields, newFields);
-    return nodes.get(0);
-  }
-
-  private static List<RexNode> transformRex(
-      List<RexNode> nodes,
-      final List<RelDataTypeField> oldFields,
-      final List<RelDataTypeField> newFields) {
-    RexShuttle shuttle = new RexShuttle() {
-      @Override public RexNode visitInputRef(RexInputRef ref) {
-        RelDataTypeField f = oldFields.get(ref.getIndex());
-        for (int index = 0; index < newFields.size(); index++) {
-          RelDataTypeField newf = newFields.get(index);
-          if (f.getKey().equals(newf.getKey())
-              && f.getValue() == newf.getValue()) {
-            return new RexInputRef(index, f.getValue());
-          }
-        }
-        throw MatchFailed.INSTANCE;
-      }
-    };
-    return shuttle.apply(nodes);
-  }
-}
-
-// End MaterializedViewSubstitutionVisitor.java


[06/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join12.q.out b/ql/src/test/results/clientpositive/join12.q.out
index b3d3997..f79e272 100644
--- a/ql/src/test/results/clientpositive/join12.q.out
+++ b/ql/src/test/results/clientpositive/join12.q.out
@@ -32,47 +32,47 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -84,14 +84,14 @@ STAGE PLANS:
             1 _col0 (type: string)
             2 _col0 (type: string)
           outputColumnNames: _col0, _col3
-          Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col3 (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join16.q.out b/ql/src/test/results/clientpositive/join16.q.out
index 61d072a..39c6408 100644
--- a/ql/src/test/results/clientpositive/join16.q.out
+++ b/ql/src/test/results/clientpositive/join16.q.out
@@ -14,32 +14,32 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: tab
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -48,14 +48,14 @@ STAGE PLANS:
             0 _col0 (type: string), _col1 (type: string)
             1 _col0 (type: string), _col1 (type: string)
           outputColumnNames: _col0, _col3
-          Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col3 (type: string)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join4.q.out b/ql/src/test/results/clientpositive/join4.q.out
index 0bd1edd..10b7fbd 100644
--- a/ql/src/test/results/clientpositive/join4.q.out
+++ b/ql/src/test/results/clientpositive/join4.q.out
@@ -65,17 +65,17 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join5.q.out b/ql/src/test/results/clientpositive/join5.q.out
index d981c74..105a731 100644
--- a/ql/src/test/results/clientpositive/join5.q.out
+++ b/ql/src/test/results/clientpositive/join5.q.out
@@ -49,17 +49,17 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: src2

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out
index 124b47e..185b542 100644
--- a/ql/src/test/results/clientpositive/join8.q.out
+++ b/ql/src/test/results/clientpositive/join8.q.out
@@ -65,17 +65,17 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/join_filters_overlap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/join_filters_overlap.q.out
index 8ef37a4..db0b26d 100644
--- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out
+++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out
@@ -521,10 +521,10 @@ STAGE PLANS:
                Right Outer Join 0 to 1
                Left Outer Join 1 to 2
           filter mappings:
-            1 [0, 2, 2, 2]
+            1 [0, 1, 2, 1]
           filter predicates:
             0 
-            1 {(VALUE._col0 = 50)} {(VALUE._col0 > 10)} {(VALUE._col0 = 60)} {(VALUE._col0 > 20)}
+            1 {(VALUE._col0 = 50)} {(VALUE._col0 = 60)}
             2 
           keys:
             0 _col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
index 841ef14..c563976 100644
--- a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out
@@ -666,30 +666,30 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -807,30 +807,30 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -1411,30 +1411,30 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
index b53b2ed..278c55b 100644
--- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
+++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out
@@ -798,12 +798,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -812,7 +812,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -830,17 +830,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -951,12 +951,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -965,7 +965,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -983,17 +983,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -1721,12 +1721,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -1735,7 +1735,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -1753,17 +1753,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -2579,12 +2579,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -2593,7 +2593,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -2611,17 +2611,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -2732,12 +2732,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -2746,7 +2746,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -2764,17 +2764,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -3383,12 +3383,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -3397,7 +3397,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -3415,17 +3415,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
index 1232767..bac4ae0 100644
--- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
@@ -343,12 +343,12 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 1) and (key > 2)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -358,10 +358,10 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col1
                         input vertices:
                           1 Map 2
-                        Statistics: Num rows: 60 Data size: 1126 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 182 Data size: 3400 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          Statistics: Num rows: 60 Data size: 1126 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 182 Data size: 3400 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -374,17 +374,17 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 2) and (key > 1)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
 
@@ -449,17 +449,17 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 2) and (key > 1)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
 
@@ -493,17 +493,17 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 9312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 1) and (key > 2)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 55 Data size: 1024 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 3091 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Map 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/filter_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/filter_union.q.out b/ql/src/test/results/clientpositive/llap/filter_union.q.out
index 17f10df..c4af317 100644
--- a/ql/src/test/results/clientpositive/llap/filter_union.q.out
+++ b/ql/src/test/results/clientpositive/llap/filter_union.q.out
@@ -35,8 +35,6 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE), Union 3 (CONTAINS)
         Reducer 5 <- Map 4 (SIMPLE_EDGE), Union 3 (CONTAINS)
-        Reducer 7 <- Map 6 (SIMPLE_EDGE), Union 3 (CONTAINS)
-        Reducer 9 <- Map 8 (SIMPLE_EDGE), Union 3 (CONTAINS)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -45,160 +43,6 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
                   GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: false (type: boolean)
-                    Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count(key)
-                      keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        null sort order: a
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-                        tag: -1
-                        value expressions: _col1 (type: bigint)
-                        auto parallelism: true
-            Execution mode: llap
-            LLAP IO: no inputs
-            Path -> Alias:
-              nullscan://null/default.src/part_ [src]
-            Path -> Partition:
-              nullscan://null/default.src/part_ 
-                Partition
-                  input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                    bucket_count -1
-                    column.name.delimiter ,
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.NullStructSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                      bucket_count -1
-                      column.name.delimiter ,
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              nullscan://null/default.src/part_ [src]
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                  GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: false (type: boolean)
-                    Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count(key)
-                      keys: key (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        null sort order: a
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-                        tag: -1
-                        value expressions: _col1 (type: bigint)
-                        auto parallelism: true
-            Execution mode: llap
-            LLAP IO: no inputs
-            Path -> Alias:
-              nullscan://null/default.src/part_ [src]
-            Path -> Partition:
-              nullscan://null/default.src/part_ 
-                Partition
-                  input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                    bucket_count -1
-                    column.name.delimiter ,
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.NullStructSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                      bucket_count -1
-                      column.name.delimiter ,
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              nullscan://null/default.src/part_ [src]
-        Map 6 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
-                  GatherStats: false
                   Select Operator
                     expressions: key (type: string)
                     outputColumnNames: key
@@ -271,7 +115,7 @@ STAGE PLANS:
                   name: default.src
             Truncated Path -> Alias:
               /src [src]
-        Map 8 
+        Map 4 
             Map Operator Tree:
                 TableScan
                   alias: src
@@ -358,78 +202,6 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-                Select Operator
-                  expressions: _col0 (type: string), _col1 (type: bigint), 1 (type: int)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 99 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    Statistics: Num rows: 412 Data size: 40788 Basic stats: COMPLETE Column stats: COMPLETE
-#### A masked pattern was here ####
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        properties:
-                          columns _col0,_col1,_col2
-                          columns.types string:bigint:int
-                          escape.delim \
-                          hive.serialization.extend.additional.nesting.levels true
-                          serialization.escape.crlf true
-                          serialization.format 1
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    TotalFiles: 1
-                    GatherStats: false
-                    MultiFileSpray: false
-        Reducer 5 
-            Execution mode: llap
-            Needs Tagging: false
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 95 Basic stats: COMPLETE Column stats: COMPLETE
-                Select Operator
-                  expressions: _col0 (type: string), _col1 (type: bigint), 2 (type: int)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 1 Data size: 99 Basic stats: COMPLETE Column stats: COMPLETE
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    Statistics: Num rows: 412 Data size: 40788 Basic stats: COMPLETE Column stats: COMPLETE
-#### A masked pattern was here ####
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        properties:
-                          columns _col0,_col1,_col2
-                          columns.types string:bigint:int
-                          escape.delim \
-                          hive.serialization.extend.additional.nesting.levels true
-                          serialization.escape.crlf true
-                          serialization.format 1
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    TotalFiles: 1
-                    GatherStats: false
-                    MultiFileSpray: false
-        Reducer 7 
-            Execution mode: llap
-            Needs Tagging: false
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 205 Data size: 19475 Basic stats: COMPLETE Column stats: COMPLETE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: bigint), 3 (type: int)
@@ -440,7 +212,7 @@ STAGE PLANS:
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 412 Data size: 40788 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 410 Data size: 40590 Basic stats: COMPLETE Column stats: COMPLETE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -457,7 +229,7 @@ STAGE PLANS:
                     TotalFiles: 1
                     GatherStats: false
                     MultiFileSpray: false
-        Reducer 9 
+        Reducer 5 
             Execution mode: llap
             Needs Tagging: false
             Reduce Operator Tree:
@@ -476,7 +248,7 @@ STAGE PLANS:
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 412 Data size: 40788 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 410 Data size: 40590 Basic stats: COMPLETE Column stats: COMPLETE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
index df32ad4..63984cc 100644
--- a/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/smb_mapjoin_14.q.out
@@ -634,17 +634,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Map 4 
@@ -653,17 +653,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -675,7 +675,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -793,17 +793,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Map 4 
@@ -812,17 +812,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -834,7 +834,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1594,17 +1594,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Map 4 
@@ -1613,17 +1613,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -1635,7 +1635,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash


[04/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/perf/query6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query6.q.out b/ql/src/test/results/clientpositive/perf/query6.q.out
index 03fff79..6ea20e9 100644
--- a/ql/src/test/results/clientpositive/perf/query6.q.out
+++ b/ql/src/test/results/clientpositive/perf/query6.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[111][tables = [$hdt$_5, $hdt$_6]] in Stage 'Reducer 16' is a cross product
+Warning: Shuffle Join MERGEJOIN[111][tables = [$hdt$_5, $hdt$_6]] in Stage 'Reducer 12' is a cross product
 PREHOOK: query: explain
 select  a.ca_state state, count(*) cnt
  from customer_address a
@@ -52,16 +52,16 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Reducer 12 <- Map 11 (SIMPLE_EDGE), Map 13 (SIMPLE_EDGE)
-Reducer 15 <- Map 14 (SIMPLE_EDGE)
-Reducer 16 <- Reducer 15 (CUSTOM_SIMPLE_EDGE), Reducer 19 (CUSTOM_SIMPLE_EDGE)
-Reducer 17 <- Map 20 (SIMPLE_EDGE), Reducer 16 (SIMPLE_EDGE)
-Reducer 18 <- Map 14 (SIMPLE_EDGE)
-Reducer 19 <- Reducer 18 (CUSTOM_SIMPLE_EDGE)
+Reducer 10 <- Map 8 (SIMPLE_EDGE)
+Reducer 11 <- Reducer 10 (CUSTOM_SIMPLE_EDGE)
+Reducer 12 <- Reducer 11 (CUSTOM_SIMPLE_EDGE), Reducer 19 (CUSTOM_SIMPLE_EDGE)
+Reducer 13 <- Map 20 (SIMPLE_EDGE), Reducer 12 (SIMPLE_EDGE)
+Reducer 16 <- Map 15 (SIMPLE_EDGE), Map 17 (SIMPLE_EDGE)
+Reducer 19 <- Map 18 (SIMPLE_EDGE)
 Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
-Reducer 3 <- Map 10 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
-Reducer 4 <- Reducer 12 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
-Reducer 5 <- Reducer 17 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE)
+Reducer 3 <- Map 14 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
+Reducer 4 <- Reducer 16 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
+Reducer 5 <- Reducer 13 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE)
 Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
 Reducer 7 <- Reducer 6 (SIMPLE_EDGE)
 Reducer 9 <- Map 8 (SIMPLE_EDGE)
@@ -71,160 +71,156 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7
-      File Output Operator [FS_77]
-        Limit [LIM_76] (rows=100 width=88)
+      File Output Operator [FS_76]
+        Limit [LIM_75] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_75] (rows=42591679 width=88)
+          Select Operator [SEL_74] (rows=127775039 width=88)
             Output:["_col0","_col1"]
           <-Reducer 6 [SIMPLE_EDGE]
-            SHUFFLE [RS_74]
-              Filter Operator [FIL_72] (rows=42591679 width=88)
+            SHUFFLE [RS_73]
+              Filter Operator [FIL_71] (rows=127775039 width=88)
                 predicate:(_col1 >= 10)
-                Group By Operator [GBY_71] (rows=127775039 width=88)
+                Group By Operator [GBY_70] (rows=383325119 width=88)
                   Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
                 <-Reducer 5 [SIMPLE_EDGE]
-                  SHUFFLE [RS_70]
+                  SHUFFLE [RS_69]
                     PartitionCols:_col0
-                    Group By Operator [GBY_69] (rows=255550079 width=88)
-                      Output:["_col0","_col1"],aggregations:["count()"],keys:_col1
-                      Select Operator [SEL_68] (rows=255550079 width=88)
-                        Output:["_col1"]
-                        Filter Operator [FIL_67] (rows=255550079 width=88)
-                          predicate:(_col10 > (1.2 * CASE WHEN (_col15 is null) THEN (null) ELSE (_col14) END))
-                          Select Operator [SEL_66] (rows=766650239 width=88)
-                            Output:["_col1","_col10","_col14","_col15"]
-                            Merge Join Operator [MERGEJOIN_114] (rows=766650239 width=88)
-                              Conds:RS_63._col6=RS_64._col0(Inner),Output:["_col1","_col3","_col4","_col12"]
-                            <-Reducer 17 [SIMPLE_EDGE]
-                              SHUFFLE [RS_64]
-                                PartitionCols:_col0
-                                Select Operator [SEL_53] (rows=80353 width=1119)
-                                  Output:["_col0"]
-                                  Merge Join Operator [MERGEJOIN_112] (rows=80353 width=1119)
-                                    Conds:RS_50._col0=RS_51._col1(Inner),Output:["_col2"]
-                                  <-Map 20 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_51]
+                    Group By Operator [GBY_68] (rows=766650239 width=88)
+                      Output:["_col0","_col1"],aggregations:["count()"],keys:_col9
+                      Merge Join Operator [MERGEJOIN_114] (rows=766650239 width=88)
+                        Conds:RS_64._col4=RS_65._col0(Inner),Output:["_col9"]
+                      <-Reducer 13 [SIMPLE_EDGE]
+                        SHUFFLE [RS_65]
+                          PartitionCols:_col0
+                          Select Operator [SEL_54] (rows=169400 width=1436)
+                            Output:["_col0"]
+                            Filter Operator [FIL_53] (rows=169400 width=1436)
+                              predicate:(_col5 > (1.2 * CASE WHEN (_col1 is null) THEN (null) ELSE (_col0) END))
+                              Merge Join Operator [MERGEJOIN_112] (rows=508200 width=1436)
+                                Conds:RS_50._col2=RS_51._col2(Inner),Output:["_col0","_col1","_col4","_col5"]
+                              <-Map 20 [SIMPLE_EDGE]
+                                SHUFFLE [RS_51]
+                                  PartitionCols:_col2
+                                  Select Operator [SEL_46] (rows=462000 width=1436)
+                                    Output:["_col0","_col1","_col2"]
+                                    Filter Operator [FIL_107] (rows=462000 width=1436)
+                                      predicate:i_item_sk is not null
+                                      TableScan [TS_44] (rows=462000 width=1436)
+                                        default@item,i,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_category"]
+                              <-Reducer 12 [SIMPLE_EDGE]
+                                SHUFFLE [RS_50]
+                                  PartitionCols:_col2
+                                  Merge Join Operator [MERGEJOIN_111] (rows=231000 width=1445)
+                                    Conds:(Inner),Output:["_col0","_col1","_col2"]
+                                  <-Reducer 11 [CUSTOM_SIMPLE_EDGE]
+                                    PARTITION_ONLY_SHUFFLE [RS_48]
+                                      Select Operator [SEL_43] (rows=1 width=8)
+                                        Filter Operator [FIL_42] (rows=1 width=8)
+                                          predicate:(sq_count_check(_col0) <= 1)
+                                          Group By Operator [GBY_40] (rows=1 width=8)
+                                            Output:["_col0"],aggregations:["count(VALUE._col0)"]
+                                          <-Reducer 10 [CUSTOM_SIMPLE_EDGE]
+                                            PARTITION_ONLY_SHUFFLE [RS_39]
+                                              Group By Operator [GBY_38] (rows=1 width=8)
+                                                Output:["_col0"],aggregations:["count()"]
+                                                Select Operator [SEL_36] (rows=9131 width=1119)
+                                                  Group By Operator [GBY_35] (rows=9131 width=1119)
+                                                    Output:["_col0"],keys:KEY._col0
+                                                  <-Map 8 [SIMPLE_EDGE]
+                                                    SHUFFLE [RS_34]
+                                                      PartitionCols:_col0
+                                                      Group By Operator [GBY_33] (rows=18262 width=1119)
+                                                        Output:["_col0"],keys:d_month_seq
+                                                        Select Operator [SEL_32] (rows=18262 width=1119)
+                                                          Output:["d_month_seq"]
+                                                          Filter Operator [FIL_106] (rows=18262 width=1119)
+                                                            predicate:((d_year = 2000) and (d_moy = 2))
+                                                            TableScan [TS_3] (rows=73049 width=1119)
+                                                              default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_month_seq","d_year","d_moy"]
+                                  <-Reducer 19 [CUSTOM_SIMPLE_EDGE]
+                                    PARTITION_ONLY_SHUFFLE [RS_47]
+                                      Select Operator [SEL_29] (rows=231000 width=1436)
+                                        Output:["_col0","_col1","_col2"]
+                                        Group By Operator [GBY_28] (rows=231000 width=1436)
+                                          Output:["_col0","_col1"],aggregations:["avg(VALUE._col0)"],keys:KEY._col0
+                                        <-Map 18 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_27]
+                                            PartitionCols:_col0
+                                            Group By Operator [GBY_26] (rows=462000 width=1436)
+                                              Output:["_col0","_col1"],aggregations:["avg(i_current_price)"],keys:i_category
+                                              Filter Operator [FIL_105] (rows=462000 width=1436)
+                                                predicate:i_category is not null
+                                                TableScan [TS_23] (rows=462000 width=1436)
+                                                  default@item,j,Tbl:COMPLETE,Col:NONE,Output:["i_current_price","i_category"]
+                      <-Reducer 4 [SIMPLE_EDGE]
+                        SHUFFLE [RS_64]
+                          PartitionCols:_col4
+                          Merge Join Operator [MERGEJOIN_113] (rows=696954748 width=88)
+                            Conds:RS_61._col5=RS_62._col0(Inner),Output:["_col4","_col9"]
+                          <-Reducer 16 [SIMPLE_EDGE]
+                            SHUFFLE [RS_62]
+                              PartitionCols:_col0
+                              Merge Join Operator [MERGEJOIN_110] (rows=88000001 width=860)
+                                Conds:RS_19._col1=RS_20._col0(Inner),Output:["_col0","_col3"]
+                              <-Map 15 [SIMPLE_EDGE]
+                                SHUFFLE [RS_19]
+                                  PartitionCols:_col1
+                                  Select Operator [SEL_15] (rows=80000000 width=860)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_103] (rows=80000000 width=860)
+                                      predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
+                                      TableScan [TS_13] (rows=80000000 width=860)
+                                        default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
+                              <-Map 17 [SIMPLE_EDGE]
+                                SHUFFLE [RS_20]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_18] (rows=40000000 width=1014)
+                                    Output:["_col0","_col1"]
+                                    Filter Operator [FIL_104] (rows=40000000 width=1014)
+                                      predicate:ca_address_sk is not null
+                                      TableScan [TS_16] (rows=40000000 width=1014)
+                                        default@customer_address,a,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
+                          <-Reducer 3 [SIMPLE_EDGE]
+                            SHUFFLE [RS_61]
+                              PartitionCols:_col5
+                              Merge Join Operator [MERGEJOIN_109] (rows=633595212 width=88)
+                                Conds:RS_58._col0=RS_59._col0(Inner),Output:["_col4","_col5"]
+                              <-Map 14 [SIMPLE_EDGE]
+                                SHUFFLE [RS_59]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_12] (rows=575995635 width=88)
+                                    Output:["_col0","_col1","_col2"]
+                                    Filter Operator [FIL_102] (rows=575995635 width=88)
+                                      predicate:(ss_customer_sk is not null and ss_sold_date_sk is not null and ss_item_sk is not null)
+                                      TableScan [TS_10] (rows=575995635 width=88)
+                                        default@store_sales,s,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk"]
+                              <-Reducer 2 [SIMPLE_EDGE]
+                                SHUFFLE [RS_58]
+                                  PartitionCols:_col0
+                                  Merge Join Operator [MERGEJOIN_108] (rows=80353 width=1119)
+                                    Conds:RS_55._col1=RS_56._col0(Inner),Output:["_col0"]
+                                  <-Map 1 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_55]
                                       PartitionCols:_col1
-                                      Select Operator [SEL_46] (rows=73049 width=1119)
+                                      Select Operator [SEL_2] (rows=73049 width=1119)
                                         Output:["_col0","_col1"]
-                                        Filter Operator [FIL_107] (rows=73049 width=1119)
+                                        Filter Operator [FIL_100] (rows=73049 width=1119)
                                           predicate:(d_date_sk is not null and d_month_seq is not null)
-                                          TableScan [TS_44] (rows=73049 width=1119)
+                                          TableScan [TS_0] (rows=73049 width=1119)
                                             default@date_dim,d,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"]
-                                  <-Reducer 16 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_50]
+                                  <-Reducer 9 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_56]
                                       PartitionCols:_col0
-                                      Merge Join Operator [MERGEJOIN_111] (rows=9131 width=1128)
-                                        Conds:(Inner),Output:["_col0"]
-                                      <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
-                                        PARTITION_ONLY_SHUFFLE [RS_47]
-                                          Group By Operator [GBY_28] (rows=9131 width=1119)
-                                            Output:["_col0"],keys:KEY._col0
-                                          <-Map 14 [SIMPLE_EDGE]
-                                            SHUFFLE [RS_27]
-                                              PartitionCols:_col0
-                                              Group By Operator [GBY_26] (rows=18262 width=1119)
-                                                Output:["_col0"],keys:d_month_seq
-                                                Select Operator [SEL_25] (rows=18262 width=1119)
-                                                  Output:["d_month_seq"]
-                                                  Filter Operator [FIL_105] (rows=18262 width=1119)
-                                                    predicate:((d_year = 2000) and (d_moy = 2) and d_month_seq is not null)
-                                                    TableScan [TS_23] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_month_seq","d_year","d_moy"]
-                                      <-Reducer 19 [CUSTOM_SIMPLE_EDGE]
-                                        PARTITION_ONLY_SHUFFLE [RS_48]
-                                          Select Operator [SEL_43] (rows=1 width=8)
-                                            Filter Operator [FIL_42] (rows=1 width=8)
-                                              predicate:(sq_count_check(_col0) <= 1)
-                                              Group By Operator [GBY_40] (rows=1 width=8)
-                                                Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                                              <-Reducer 18 [CUSTOM_SIMPLE_EDGE]
-                                                PARTITION_ONLY_SHUFFLE [RS_39]
-                                                  Group By Operator [GBY_38] (rows=1 width=8)
-                                                    Output:["_col0"],aggregations:["count()"]
-                                                    Select Operator [SEL_36] (rows=9131 width=1119)
-                                                      Group By Operator [GBY_35] (rows=9131 width=1119)
-                                                        Output:["_col0"],keys:KEY._col0
-                                                      <-Map 14 [SIMPLE_EDGE]
-                                                        SHUFFLE [RS_34]
-                                                          PartitionCols:_col0
-                                                          Group By Operator [GBY_33] (rows=18262 width=1119)
-                                                            Output:["_col0"],keys:d_month_seq
-                                                            Select Operator [SEL_32] (rows=18262 width=1119)
-                                                              Output:["d_month_seq"]
-                                                              Filter Operator [FIL_106] (rows=18262 width=1119)
-                                                                predicate:((d_year = 2000) and (d_moy = 2))
-                                                                 Please refer to the previous TableScan [TS_23]
-                            <-Reducer 4 [SIMPLE_EDGE]
-                              SHUFFLE [RS_63]
-                                PartitionCols:_col6
-                                Merge Join Operator [MERGEJOIN_113] (rows=696954748 width=88)
-                                  Conds:RS_60._col8=RS_61._col0(Inner),Output:["_col1","_col3","_col4","_col6","_col12"]
-                                <-Reducer 12 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_61]
-                                    PartitionCols:_col0
-                                    Merge Join Operator [MERGEJOIN_110] (rows=88000001 width=860)
-                                      Conds:RS_19._col1=RS_20._col0(Inner),Output:["_col0","_col3"]
-                                    <-Map 11 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_19]
-                                        PartitionCols:_col1
-                                        Select Operator [SEL_15] (rows=80000000 width=860)
-                                          Output:["_col0","_col1"]
-                                          Filter Operator [FIL_103] (rows=80000000 width=860)
-                                            predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
-                                            TableScan [TS_13] (rows=80000000 width=860)
-                                              default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
-                                    <-Map 13 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_20]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_18] (rows=40000000 width=1014)
-                                          Output:["_col0","_col1"]
-                                          Filter Operator [FIL_104] (rows=40000000 width=1014)
-                                            predicate:ca_address_sk is not null
-                                            TableScan [TS_16] (rows=40000000 width=1014)
-                                              default@customer_address,a,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
-                                <-Reducer 3 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_60]
-                                    PartitionCols:_col8
-                                    Merge Join Operator [MERGEJOIN_109] (rows=633595212 width=88)
-                                      Conds:RS_57._col0=RS_58._col1(Inner),Output:["_col1","_col3","_col4","_col6","_col8"]
-                                    <-Map 10 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_58]
-                                        PartitionCols:_col1
-                                        Select Operator [SEL_12] (rows=575995635 width=88)
-                                          Output:["_col0","_col1","_col2"]
-                                          Filter Operator [FIL_102] (rows=575995635 width=88)
-                                            predicate:(ss_customer_sk is not null and ss_sold_date_sk is not null and ss_item_sk is not null)
-                                            TableScan [TS_10] (rows=575995635 width=88)
-                                              default@store_sales,s,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk"]
-                                    <-Reducer 2 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_57]
-                                        PartitionCols:_col0
-                                        Merge Join Operator [MERGEJOIN_108] (rows=508200 width=1436)
-                                          Conds:RS_54._col2=RS_55._col2(Left Outer),Output:["_col0","_col1","_col3","_col4"]
-                                        <-Map 1 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_54]
-                                            PartitionCols:_col2
-                                            Select Operator [SEL_2] (rows=462000 width=1436)
-                                              Output:["_col0","_col1","_col2"]
-                                              Filter Operator [FIL_100] (rows=462000 width=1436)
-                                                predicate:i_item_sk is not null
-                                                TableScan [TS_0] (rows=462000 width=1436)
-                                                  default@item,i,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_category"]
-                                        <-Reducer 9 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_55]
-                                            PartitionCols:_col2
-                                            Select Operator [SEL_9] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2"]
-                                              Group By Operator [GBY_8] (rows=231000 width=1436)
-                                                Output:["_col0","_col1"],aggregations:["avg(VALUE._col0)"],keys:KEY._col0
-                                              <-Map 8 [SIMPLE_EDGE]
-                                                SHUFFLE [RS_7]
-                                                  PartitionCols:_col0
-                                                  Group By Operator [GBY_6] (rows=462000 width=1436)
-                                                    Output:["_col0","_col1"],aggregations:["avg(i_current_price)"],keys:i_category
-                                                    Filter Operator [FIL_101] (rows=462000 width=1436)
-                                                      predicate:i_category is not null
-                                                      TableScan [TS_3] (rows=462000 width=1436)
-                                                        default@item,j,Tbl:COMPLETE,Col:NONE,Output:["i_current_price","i_category"]
+                                      Group By Operator [GBY_8] (rows=9131 width=1119)
+                                        Output:["_col0"],keys:KEY._col0
+                                      <-Map 8 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_7]
+                                          PartitionCols:_col0
+                                          Group By Operator [GBY_6] (rows=18262 width=1119)
+                                            Output:["_col0"],keys:d_month_seq
+                                            Select Operator [SEL_5] (rows=18262 width=1119)
+                                              Output:["d_month_seq"]
+                                              Filter Operator [FIL_101] (rows=18262 width=1119)
+                                                predicate:((d_year = 2000) and (d_moy = 2) and d_month_seq is not null)
+                                                 Please refer to the previous TableScan [TS_3]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/perf/query81.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query81.q.out b/ql/src/test/results/clientpositive/perf/query81.q.out
index c7571fe..8580377 100644
--- a/ql/src/test/results/clientpositive/perf/query81.q.out
+++ b/ql/src/test/results/clientpositive/perf/query81.q.out
@@ -77,28 +77,28 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 4
-      File Output Operator [FS_68]
-        Select Operator [SEL_67] (rows=100 width=860)
+      File Output Operator [FS_67]
+        Select Operator [SEL_66] (rows=100 width=860)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15"]
-          Limit [LIM_66] (rows=100 width=860)
+          Limit [LIM_65] (rows=100 width=860)
             Number of rows:100
-            Select Operator [SEL_65] (rows=32266667 width=860)
+            Select Operator [SEL_64] (rows=96800003 width=860)
               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
             <-Reducer 3 [SIMPLE_EDGE]
-              SHUFFLE [RS_64]
-                Select Operator [SEL_63] (rows=32266667 width=860)
+              SHUFFLE [RS_63]
+                Select Operator [SEL_62] (rows=96800003 width=860)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
-                  Filter Operator [FIL_62] (rows=32266667 width=860)
-                    predicate:(_col2 > CASE WHEN (_col22 is null) THEN (null) ELSE (_col21) END)
-                    Select Operator [SEL_61] (rows=96800003 width=860)
-                      Output:["_col2","_col4","_col5","_col6","_col7","_col8","_col9","_col11","_col12","_col13","_col14","_col16","_col18","_col19","_col20","_col21","_col22"]
-                      Merge Join Operator [MERGEJOIN_106] (rows=96800003 width=860)
-                        Conds:RS_58._col0=RS_59._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col14","_col15","_col16","_col17","_col20","_col21","_col22"]
-                      <-Reducer 10 [SIMPLE_EDGE]
-                        SHUFFLE [RS_59]
-                          PartitionCols:_col0
-                          Merge Join Operator [MERGEJOIN_105] (rows=24200000 width=1014)
-                            Conds:RS_51._col1=RS_52._col2(Left Outer),Output:["_col0","_col2","_col3","_col4"]
+                  Merge Join Operator [MERGEJOIN_108] (rows=96800003 width=860)
+                    Conds:RS_59._col0=RS_60._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col14","_col15","_col16","_col17","_col20"]
+                  <-Reducer 10 [SIMPLE_EDGE]
+                    SHUFFLE [RS_60]
+                      PartitionCols:_col0
+                      Select Operator [SEL_55] (rows=8066666 width=1014)
+                        Output:["_col0","_col2"]
+                        Filter Operator [FIL_54] (rows=8066666 width=1014)
+                          predicate:(_col2 > CASE WHEN (_col4 is null) THEN (null) ELSE (_col3) END)
+                          Merge Join Operator [MERGEJOIN_107] (rows=24200000 width=1014)
+                            Conds:RS_51._col1=RS_52._col2(Inner),Output:["_col0","_col2","_col3","_col4"]
                           <-Reducer 13 [SIMPLE_EDGE]
                             SHUFFLE [RS_52]
                               PartitionCols:_col2
@@ -115,28 +115,28 @@ Stage-0
                                         PartitionCols:_col0
                                         Group By Operator [GBY_42] (rows=44000000 width=1014)
                                           Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col7, _col1
-                                          Merge Join Operator [MERGEJOIN_104] (rows=44000000 width=1014)
+                                          Merge Join Operator [MERGEJOIN_106] (rows=44000000 width=1014)
                                             Conds:RS_38._col2=RS_39._col0(Inner),Output:["_col1","_col3","_col7"]
                                           <-Map 15 [SIMPLE_EDGE]
                                             SHUFFLE [RS_39]
                                               PartitionCols:_col0
                                               Select Operator [SEL_34] (rows=40000000 width=1014)
                                                 Output:["_col0","_col1"]
-                                                Filter Operator [FIL_99] (rows=40000000 width=1014)
+                                                Filter Operator [FIL_101] (rows=40000000 width=1014)
                                                   predicate:(ca_address_sk is not null and ca_state is not null)
                                                   TableScan [TS_12] (rows=40000000 width=1014)
                                                     default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
                                           <-Reducer 11 [SIMPLE_EDGE]
                                             SHUFFLE [RS_38]
                                               PartitionCols:_col2
-                                              Merge Join Operator [MERGEJOIN_103] (rows=31678769 width=106)
+                                              Merge Join Operator [MERGEJOIN_105] (rows=31678769 width=106)
                                                 Conds:RS_35._col0=RS_36._col0(Inner),Output:["_col1","_col2","_col3"]
                                               <-Map 14 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_36]
                                                   PartitionCols:_col0
                                                   Select Operator [SEL_11] (rows=36524 width=1119)
                                                     Output:["_col0"]
-                                                    Filter Operator [FIL_95] (rows=36524 width=1119)
+                                                    Filter Operator [FIL_97] (rows=36524 width=1119)
                                                       predicate:((d_year = 1998) and d_date_sk is not null)
                                                       TableScan [TS_9] (rows=73049 width=1119)
                                                         default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
@@ -145,7 +145,7 @@ Stage-0
                                                   PartitionCols:_col0
                                                   Select Operator [SEL_28] (rows=28798881 width=106)
                                                     Output:["_col0","_col1","_col2","_col3"]
-                                                    Filter Operator [FIL_97] (rows=28798881 width=106)
+                                                    Filter Operator [FIL_99] (rows=28798881 width=106)
                                                       predicate:(cr_returned_date_sk is not null and cr_returning_addr_sk is not null)
                                                       TableScan [TS_6] (rows=28798881 width=106)
                                                         default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_returned_date_sk","cr_returning_customer_sk","cr_returning_addr_sk","cr_return_amt_inc_tax"]
@@ -161,20 +161,20 @@ Stage-0
                                     PartitionCols:_col0, _col1
                                     Group By Operator [GBY_22] (rows=44000000 width=1014)
                                       Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col7, _col1
-                                      Merge Join Operator [MERGEJOIN_102] (rows=44000000 width=1014)
+                                      Merge Join Operator [MERGEJOIN_104] (rows=44000000 width=1014)
                                         Conds:RS_18._col2=RS_19._col0(Inner),Output:["_col1","_col3","_col7"]
                                       <-Map 15 [SIMPLE_EDGE]
                                         SHUFFLE [RS_19]
                                           PartitionCols:_col0
                                           Select Operator [SEL_14] (rows=40000000 width=1014)
                                             Output:["_col0","_col1"]
-                                            Filter Operator [FIL_96] (rows=40000000 width=1014)
+                                            Filter Operator [FIL_98] (rows=40000000 width=1014)
                                               predicate:ca_address_sk is not null
                                                Please refer to the previous TableScan [TS_12]
                                       <-Reducer 7 [SIMPLE_EDGE]
                                         SHUFFLE [RS_18]
                                           PartitionCols:_col2
-                                          Merge Join Operator [MERGEJOIN_101] (rows=31678769 width=106)
+                                          Merge Join Operator [MERGEJOIN_103] (rows=31678769 width=106)
                                             Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1","_col2","_col3"]
                                           <-Map 14 [SIMPLE_EDGE]
                                             SHUFFLE [RS_16]
@@ -185,30 +185,30 @@ Stage-0
                                               PartitionCols:_col0
                                               Select Operator [SEL_8] (rows=28798881 width=106)
                                                 Output:["_col0","_col1","_col2","_col3"]
-                                                Filter Operator [FIL_94] (rows=28798881 width=106)
+                                                Filter Operator [FIL_96] (rows=28798881 width=106)
                                                   predicate:(cr_returned_date_sk is not null and cr_returning_addr_sk is not null and cr_returning_customer_sk is not null)
                                                    Please refer to the previous TableScan [TS_6]
-                      <-Reducer 2 [SIMPLE_EDGE]
-                        SHUFFLE [RS_58]
+                  <-Reducer 2 [SIMPLE_EDGE]
+                    SHUFFLE [RS_59]
+                      PartitionCols:_col0
+                      Merge Join Operator [MERGEJOIN_102] (rows=88000001 width=860)
+                        Conds:RS_56._col2=RS_57._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col14","_col15","_col16","_col17"]
+                      <-Map 1 [SIMPLE_EDGE]
+                        SHUFFLE [RS_56]
+                          PartitionCols:_col2
+                          Select Operator [SEL_2] (rows=80000000 width=860)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                            Filter Operator [FIL_94] (rows=80000000 width=860)
+                              predicate:(c_customer_sk is not null and c_current_addr_sk is not null)
+                              TableScan [TS_0] (rows=80000000 width=860)
+                                default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_customer_id","c_current_addr_sk","c_salutation","c_first_name","c_last_name"]
+                      <-Map 5 [SIMPLE_EDGE]
+                        SHUFFLE [RS_57]
                           PartitionCols:_col0
-                          Merge Join Operator [MERGEJOIN_100] (rows=88000001 width=860)
-                            Conds:RS_55._col2=RS_56._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col14","_col15","_col16","_col17"]
-                          <-Map 1 [SIMPLE_EDGE]
-                            SHUFFLE [RS_55]
-                              PartitionCols:_col2
-                              Select Operator [SEL_2] (rows=80000000 width=860)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                Filter Operator [FIL_92] (rows=80000000 width=860)
-                                  predicate:(c_customer_sk is not null and c_current_addr_sk is not null)
-                                  TableScan [TS_0] (rows=80000000 width=860)
-                                    default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_customer_id","c_current_addr_sk","c_salutation","c_first_name","c_last_name"]
-                          <-Map 5 [SIMPLE_EDGE]
-                            SHUFFLE [RS_56]
-                              PartitionCols:_col0
-                              Select Operator [SEL_5] (rows=20000000 width=1014)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col8","_col9","_col10","_col11"]
-                                Filter Operator [FIL_93] (rows=20000000 width=1014)
-                                  predicate:((ca_state = 'IL') and ca_address_sk is not null)
-                                  TableScan [TS_3] (rows=40000000 width=1014)
-                                    default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_street_number","ca_street_name","ca_street_type","ca_suite_number","ca_city","ca_county","ca_state","ca_zip","ca_country","ca_gmt_offset","ca_location_type"]
+                          Select Operator [SEL_5] (rows=20000000 width=1014)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col8","_col9","_col10","_col11"]
+                            Filter Operator [FIL_95] (rows=20000000 width=1014)
+                              predicate:((ca_state = 'IL') and ca_address_sk is not null)
+                              TableScan [TS_3] (rows=40000000 width=1014)
+                                default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_street_number","ca_street_name","ca_street_type","ca_suite_number","ca_city","ca_county","ca_state","ca_zip","ca_country","ca_gmt_offset","ca_location_type"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby.q.out b/ql/src/test/results/clientpositive/ppd_gby.q.out
index a36bb60..d211cf0 100644
--- a/ql/src/test/results/clientpositive/ppd_gby.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby.q.out
@@ -22,19 +22,19 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((value > 'val_10') and (value > 'val_200')) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (value > 'val_200') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(key)
                 keys: value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -42,17 +42,17 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > 30) or (_col0 < 'val_400')) (type: boolean)
-            Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string)
               outputColumnNames: _col0
-              Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -231,19 +231,19 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((value > 'val_10') and (value > 'val_200')) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (value > 'val_200') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(key)
                 keys: value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -251,17 +251,17 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > 30) or (_col0 < 'val_400')) (type: boolean)
-            Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string)
               outputColumnNames: _col0
-              Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_gby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby2.q.out b/ql/src/test/results/clientpositive/ppd_gby2.q.out
index d434905..3bd242b 100644
--- a/ql/src/test/results/clientpositive/ppd_gby2.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby2.q.out
@@ -25,19 +25,19 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((value > 'val_10') and (value > 'val_200')) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (value > 'val_200') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(key)
                 keys: value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -45,16 +45,16 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > 30) or (_col0 < 'val_400')) (type: boolean)
-            Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: max(_col0)
               keys: _col1 (type: bigint)
               mode: hash
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
                 table:
@@ -70,7 +70,7 @@ STAGE PLANS:
               key expressions: _col0 (type: bigint)
               sort order: +
               Map-reduce partition columns: _col0 (type: bigint)
-              Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
@@ -78,14 +78,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: bigint)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: string), _col0 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -145,19 +145,19 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((value > 'val_10') and (value > 'val_200')) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (value > 'val_200') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(key)
                 keys: value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -165,16 +165,16 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > 30) or (_col0 < 'val_400')) (type: boolean)
-            Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: max(_col0)
               keys: _col1 (type: bigint)
               mode: hash
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
                 table:
@@ -190,7 +190,7 @@ STAGE PLANS:
               key expressions: _col0 (type: bigint)
               sort order: +
               Map-reduce partition columns: _col0 (type: bigint)
-              Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
@@ -198,14 +198,14 @@ STAGE PLANS:
           keys: KEY._col0 (type: bigint)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: string), _col0 (type: bigint)
             outputColumnNames: _col0, _col1
-            Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 9 Data size: 95 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_gby_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/ppd_gby_join.q.out
index ed91def..8519174 100644
--- a/ql/src/test/results/clientpositive/ppd_gby_join.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby_join.q.out
@@ -31,32 +31,32 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-              Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -65,20 +65,20 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-            Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string)
               outputColumnNames: _col0
-              Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -94,7 +94,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -102,10 +102,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -301,32 +301,32 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-              Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -335,20 +335,20 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-            Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string)
               outputColumnNames: _col0
-              Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   table:
@@ -364,7 +364,7 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -372,10 +372,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join.q.out b/ql/src/test/results/clientpositive/ppd_join.q.out
index e48c5e2..0d09633 100644
--- a/ql/src/test/results/clientpositive/ppd_join.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join.q.out
@@ -28,32 +28,32 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-              Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -63,17 +63,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-            Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string), _col2 (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -553,32 +553,32 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-              Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+              Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -588,17 +588,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-            Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string), _col2 (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out
index f4d8089..38da828 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out
@@ -28,33 +28,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -64,10 +64,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -135,33 +135,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -171,10 +171,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat


[03/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
index 8ce86d1..403e883 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
@@ -28,33 +28,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '20') and (key > '15')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '15') and (key < '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -64,10 +64,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -255,33 +255,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '20') and (key > '15')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '15') and (key < '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -291,10 +291,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
index b8b6168..f7d5a0c 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
@@ -28,33 +28,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '20') and (key > '15')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '15') and (key < '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -64,10 +64,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -255,33 +255,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '20') and (key > '15')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '15') and (key < '20')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -291,10 +291,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
index e8e9b55..053acb5 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
@@ -34,48 +34,48 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: c
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((sqrt(key) <> 13.0) and (key < '20') and (key > '15')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '15') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -87,14 +87,14 @@ STAGE PLANS:
             1 _col0 (type: string)
             2 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -398,48 +398,48 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '20') and (key > '15') and (sqrt(key) <> 13.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: c
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((sqrt(key) <> 13.0) and (key < '20') and (key > '15')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '15') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -451,14 +451,14 @@ STAGE PLANS:
             1 _col0 (type: string)
             2 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_union.q.out b/ql/src/test/results/clientpositive/ppd_union.q.out
index 6f231b8..b56c4e0 100644
--- a/ql/src/test/results/clientpositive/ppd_union.q.out
+++ b/ql/src/test/results/clientpositive/ppd_union.q.out
@@ -28,40 +28,19 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < '100') and (key > '4') and (value > 'val_4')) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '4') and (value > 'val_4')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                Union
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '150') and (key > '4') and (value > 'val_4')) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                Union
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -290,40 +269,19 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < '100') and (key > '4') and (value > 'val_4')) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                Union
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '150') and (key > '4') and (value > 'val_4')) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key > '4') and (value > 'val_4')) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                Union
-                  Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/ppd_vc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_vc.q.out b/ql/src/test/results/clientpositive/ppd_vc.q.out
index 86ab427..1569cf6 100644
--- a/ql/src/test/results/clientpositive/ppd_vc.q.out
+++ b/ql/src/test/results/clientpositive/ppd_vc.q.out
@@ -350,18 +350,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((BLOCK__OFFSET__INSIDE__FILE < 100) and (BLOCK__OFFSET__INSIDE__FILE < 50) and key is not null) (type: boolean)
-              Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((BLOCK__OFFSET__INSIDE__FILE < 50) and key is not null) (type: boolean)
+              Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), ds (type: string), hr (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: bigint)
                   auto parallelism: false
@@ -621,11 +621,11 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 732 Data size: 7782 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
-            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 732 Data size: 7782 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
@@ -654,7 +654,7 @@ STAGE PLANS:
               key expressions: _col2 (type: string), _col3 (type: string), _col4 (type: bigint)
               null sort order: aaa
               sort order: +++
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 732 Data size: 7782 Basic stats: COMPLETE Column stats: NONE
               tag: -1
               value expressions: _col0 (type: string), _col1 (type: string)
               auto parallelism: false
@@ -690,13 +690,13 @@ STAGE PLANS:
         Select Operator
           expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: bigint)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 732 Data size: 7782 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 732 Data size: 7782 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/rcfile_null_value.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rcfile_null_value.q.out b/ql/src/test/results/clientpositive/rcfile_null_value.q.out
index f3ab47c..12b7140 100644
--- a/ql/src/test/results/clientpositive/rcfile_null_value.q.out
+++ b/ql/src/test/results/clientpositive/rcfile_null_value.q.out
@@ -100,17 +100,17 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: src2

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/router_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out
index f17959b..615d61a 100644
--- a/ql/src/test/results/clientpositive/router_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out
@@ -30,18 +30,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -51,18 +51,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -224,13 +224,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -324,18 +324,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -345,18 +345,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -518,17 +518,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -622,18 +622,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -643,18 +643,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -816,13 +816,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -916,18 +916,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -937,18 +937,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -1110,17 +1110,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join12.q.out b/ql/src/test/results/clientpositive/spark/auto_join12.q.out
index 15a94b1..c56e8da 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join12.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join12.q.out
@@ -36,12 +36,12 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) < 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
@@ -55,12 +55,12 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)
@@ -81,12 +81,12 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -99,11 +99,11 @@ STAGE PLANS:
                         input vertices:
                           1 Map 3
                           2 Map 4
-                        Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                         Select Operator
                           expressions: hash(_col0,_col3) (type: int)
                           outputColumnNames: _col0
-                          Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                           Group By Operator
                             aggregations: sum(_col0)
                             mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join16.q.out b/ql/src/test/results/clientpositive/spark/auto_join16.q.out
index 4217eeb..c9ccc9b 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join16.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join16.q.out
@@ -30,12 +30,12 @@ STAGE PLANS:
                   alias: tab
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-                    Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string), _col1 (type: string)
@@ -55,12 +55,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-                    Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -70,11 +70,11 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col3
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                         Select Operator
                           expressions: hash(_col0,_col3) (type: int)
                           outputColumnNames: _col0
-                          Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                           Group By Operator
                             aggregations: sum(_col0)
                             mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join4.q.out b/ql/src/test/results/clientpositive/spark/auto_join4.q.out
index b212b54..ca71d9f 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join4.q.out
@@ -53,12 +53,12 @@ STAGE PLANS:
                   alias: src2
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join5.q.out b/ql/src/test/results/clientpositive/spark/auto_join5.q.out
index 4226238..2b11a96 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join5.q.out
@@ -53,12 +53,12 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join8.q.out b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
index 5fdc5df..c079536 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
@@ -53,12 +53,12 @@ STAGE PLANS:
                   alias: src2
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
index 602ccb2..7a2422f 100644
--- a/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out
@@ -562,19 +562,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Sorted Merge Bucket Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -689,19 +689,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Sorted Merge Bucket Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -1219,19 +1219,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Sorted Merge Bucket Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out
index cdb69db..8a302b6 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out
@@ -699,19 +699,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Sorted Merge Bucket Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -826,19 +826,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Sorted Merge Bucket Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -1448,19 +1448,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Sorted Merge Bucket Map Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -2350,12 +2350,12 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: int)
@@ -2375,12 +2375,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -2389,7 +2389,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -2505,12 +2505,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: int)
@@ -2530,12 +2530,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -2544,7 +2544,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash
@@ -3167,12 +3167,12 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: int)
@@ -3192,12 +3192,12 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 21 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -3206,7 +3206,7 @@ STAGE PLANS:
                           1 _col0 (type: int)
                         input vertices:
                           1 Map 3
-                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           mode: hash


[08/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index a43d2be..eab8f6f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -18,15 +18,15 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
 import java.math.BigDecimal;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Calendar;
 
 import org.apache.calcite.adapter.druid.DruidQuery;
 import org.apache.calcite.rel.RelNode;
 import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.DateString;
+import org.apache.calcite.util.TimeString;
+import org.apache.calcite.util.TimestampString;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.conf.Constants;
@@ -37,8 +37,6 @@ import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.ParseDriver;
 import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 
 public class ASTBuilder {
 
@@ -270,26 +268,17 @@ public class ASTBuilder {
       val = literal.getValue3();
       type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE;
       break;
-    case DATE: {
-      //Calcite Calendar is always GMT, Hive atm uses JVM local
-      final Calendar c = (Calendar) literal.getValue();
-      final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
+    case DATE:
+      val = "'" + literal.getValueAs(DateString.class).toString() + "'";
       type = HiveParser.TOK_DATELITERAL;
-      DateFormat df = new SimpleDateFormat("yyyy-MM-dd");
-      val = df.format(dt.toDateTime(DateTimeZone.getDefault()).toDate());
-      val = "'" + val + "'";
-    }
       break;
     case TIME:
-    case TIMESTAMP: {
-      //Calcite Calendar is always GMT, Hive atm uses JVM local
-      final Calendar c = (Calendar) literal.getValue();
-      final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
+      val = "'" + literal.getValueAs(TimeString.class).toString() + "'";
+      type = HiveParser.TOK_TIMESTAMPLITERAL;
+      break;
+    case TIMESTAMP:
+      val = "'" + literal.getValueAs(TimestampString.class).toString() + "'";
       type = HiveParser.TOK_TIMESTAMPLITERAL;
-      DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
-      val = df.format(dt.toDateTime(DateTimeZone.getDefault()).toDate());
-      val = "'" + val + "'";
-    }
       break;
     case INTERVAL_YEAR:
     case INTERVAL_MONTH:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index b1efbbd..f974cc9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -18,9 +18,9 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
 import java.math.BigDecimal;
+import java.sql.Date;
 import java.sql.Timestamp;
 import java.util.ArrayList;
-import java.util.Calendar;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
@@ -42,6 +42,9 @@ import org.apache.calcite.rex.RexWindow;
 import org.apache.calcite.rex.RexWindowBound;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.type.SqlTypeUtil;
+import org.apache.calcite.util.DateString;
+import org.apache.calcite.util.TimeString;
+import org.apache.calcite.util.TimestampString;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
@@ -74,8 +77,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -292,18 +293,15 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
       case DOUBLE:
         return new ExprNodeConstantDesc(TypeInfoFactory.doubleTypeInfo,
             Double.valueOf(((Number) literal.getValue3()).doubleValue()));
-      case DATE: {
-        final Calendar c = (Calendar) literal.getValue();
+      case DATE:
         return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo,
-            new java.sql.Date(c.getTimeInMillis()));
-      }
+            Date.valueOf(literal.getValueAs(DateString.class).toString()));
       case TIME:
-      case TIMESTAMP: {
-        final Calendar c = (Calendar) literal.getValue();
-        final DateTime dt = new DateTime(c.getTimeInMillis(), DateTimeZone.forTimeZone(c.getTimeZone()));
         return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
-            new Timestamp(dt.getMillis()));
-      }
+            Timestamp.valueOf(literal.getValueAs(TimeString.class).toString()));
+      case TIMESTAMP:
+        return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo,
+            Timestamp.valueOf(literal.getValueAs(TimestampString.class).toString()));
       case BINARY:
         return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3());
       case DECIMAL:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index 52ca3b0..7665f56 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -28,7 +28,6 @@ import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 
-import org.apache.calcite.avatica.util.DateTimeUtils;
 import org.apache.calcite.avatica.util.TimeUnit;
 import org.apache.calcite.avatica.util.TimeUnitRange;
 import org.apache.calcite.plan.RelOptCluster;
@@ -50,7 +49,9 @@ import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.ConversionUtil;
+import org.apache.calcite.util.DateString;
 import org.apache.calcite.util.NlsString;
+import org.apache.calcite.util.TimestampString;
 import org.apache.hadoop.hive.common.type.Decimal128;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
@@ -99,8 +100,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectIn
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.joda.time.DateTime;
-import org.joda.time.DateTimeZone;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList.Builder;
@@ -659,40 +658,29 @@ public class RexNodeConverter {
       calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
       break;
     case DATE:
-        // The Calcite literal is in GMT, this will be converted back to JVM locale 
-        // by ASTBuilder.literal during Calcite->Hive plan conversion
-        final Calendar cal = Calendar.getInstance(DateTimeUtils.GMT_ZONE, Locale.getDefault());
-        cal.setTime((Date) value);
-        calciteLiteral = rexBuilder.makeDateLiteral(cal);
-        break;
-      case TIMESTAMP:
-        // The Calcite literal is in GMT, this will be converted back to JVM locale 
-        // by ASTBuilder.literal during Calcite->Hive plan conversion
-        final Calendar calt = Calendar.getInstance(DateTimeUtils.GMT_ZONE, Locale.getDefault());
-        if (value instanceof Calendar) {
-          final Calendar c = (Calendar) value;
-          long timeMs = c.getTimeInMillis();
-          calt.setTimeInMillis(timeMs);
-        } else {
-          final Timestamp ts = (Timestamp) value;
-          // CALCITE-1690
-          // Calcite cannot represent TIMESTAMP literals with precision higher than 3
-          if (ts.getNanos() % 1000000 != 0) {
-            throw new CalciteSemanticException(
-              "High Precision Timestamp: " + String.valueOf(ts),
-              UnsupportedFeature.HighPrecissionTimestamp);
-          }
-          calt.setTimeInMillis(ts.getTime());
-        }
-        // Must call makeLiteral, not makeTimestampLiteral 
-        // to have the RexBuilder.roundTime logic kick in
-        calciteLiteral = rexBuilder.makeLiteral(
-          calt,
-          rexBuilder.getTypeFactory().createSqlType(
-            SqlTypeName.TIMESTAMP,
-            rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP)),
-          false);
-        break;
+      final Calendar cal = Calendar.getInstance(Locale.getDefault());
+      cal.setTime((Date) value);
+      calciteLiteral = rexBuilder.makeDateLiteral(DateString.fromCalendarFields(cal));
+      break;
+    case TIMESTAMP:
+      final TimestampString tsString;
+      if (value instanceof Calendar) {
+        tsString = TimestampString.fromCalendarFields((Calendar) value);
+      } else {
+        final Timestamp ts = (Timestamp) value;
+        final Calendar calt = Calendar.getInstance(Locale.getDefault());
+        calt.setTimeInMillis(ts.getTime());
+        tsString = TimestampString.fromCalendarFields(calt).withNanos(ts.getNanos());
+      }
+      // Must call makeLiteral, not makeTimestampLiteral
+      // to have the RexBuilder.roundTime logic kick in
+      calciteLiteral = rexBuilder.makeLiteral(
+        tsString,
+        rexBuilder.getTypeFactory().createSqlType(
+          SqlTypeName.TIMESTAMP,
+          rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP)),
+        false);
+      break;
     case INTERVAL_YEAR_MONTH:
       // Calcite year-month literal value is months as BigDecimal
       BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) value).getTotalMonths());

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 88054e7..97bdee0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -172,7 +172,6 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateJoinTransp
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateProjectMergeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregatePullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateReduceRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveDruidProjectFilterTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExceptRewriteRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterAggregateTransposeRule;
@@ -212,7 +211,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSubQueryRemoveRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionMergeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewFilterScanRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTBuilder;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverter;
@@ -1488,7 +1487,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
             planner.addMaterialization(materialization);
           }
           // Add view-based rewriting rules to planner
-          planner.addRule(HiveMaterializedViewFilterScanRule.INSTANCE);
+          planner.addRule(HiveMaterializedViewRule.INSTANCE_PROJECT_FILTER);
+          planner.addRule(HiveMaterializedViewRule.INSTANCE_FILTER);
           // Optimize plan
           planner.setRoot(calciteOptimizedPlan);
           calciteOptimizedPlan = planner.findBestExp();
@@ -1544,7 +1544,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null,
               HepMatchOrder.BOTTOM_UP,
               DruidRules.FILTER,
-              HiveDruidProjectFilterTransposeRule.INSTANCE,
+              DruidRules.PROJECT_FILTER_TRANSPOSE,
               DruidRules.AGGREGATE_FILTER_TRANSPOSE,
               DruidRules.AGGREGATE_PROJECT,
               DruidRules.PROJECT,
@@ -3451,7 +3451,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
 
         w = cluster.getRexBuilder().makeOver(calciteAggFnRetType, calciteAggFn, calciteAggFnArgs,
             partitionKeys, ImmutableList.<RexFieldCollation> copyOf(orderKeys), lowerBound,
-            upperBound, isRows, true, false);
+            upperBound, isRows, true, false, hiveAggInfo.m_distinct);
       } else {
         // TODO: Convert to Semantic Exception
         throw new RuntimeException("Unsupported window Spec");

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/queries/clientpositive/materialized_view_create.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create.q b/ql/src/test/queries/clientpositive/materialized_view_create.q
index 57f8bb7..bb50dbb 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create.q
@@ -29,3 +29,9 @@ create materialized view cmv_mat_view5 tblproperties ('key'='value') as select a
 select * from cmv_mat_view5;
 
 show tblproperties cmv_mat_view5;
+
+drop materialized view cmv_mat_view;
+drop materialized view cmv_mat_view2;
+drop materialized view cmv_mat_view3;
+drop materialized view cmv_mat_view4;
+drop materialized view cmv_mat_view5;

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
index e95a868..b17517f 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q
@@ -57,3 +57,5 @@ select * from (
   join
   (select a, c from cmv_basetable where d = 3) table2
   on table1.a = table2.a);
+
+drop materialized view cmv_mat_view;

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join12.q.out b/ql/src/test/results/clientpositive/auto_join12.q.out
index 2fdd39f..0e9fa98 100644
--- a/ql/src/test/results/clientpositive/auto_join12.q.out
+++ b/ql/src/test/results/clientpositive/auto_join12.q.out
@@ -41,12 +41,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
@@ -57,12 +57,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)
@@ -76,12 +76,12 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -91,11 +91,11 @@ STAGE PLANS:
                     1 _col0 (type: string)
                     2 _col0 (type: string)
                   outputColumnNames: _col0, _col3
-                  Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: hash(_col0,_col3) (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col0)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join16.q.out b/ql/src/test/results/clientpositive/auto_join16.q.out
index c4d654a..f05de41 100644
--- a/ql/src/test/results/clientpositive/auto_join16.q.out
+++ b/ql/src/test/results/clientpositive/auto_join16.q.out
@@ -32,12 +32,12 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string), _col1 (type: string)
@@ -50,12 +50,12 @@ STAGE PLANS:
             alias: tab
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -63,11 +63,11 @@ STAGE PLANS:
                     0 _col0 (type: string), _col1 (type: string)
                     1 _col0 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col3
-                  Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: hash(_col0,_col3) (type: int)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_col0)
                       mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join4.q.out b/ql/src/test/results/clientpositive/auto_join4.q.out
index d4fb977..6c0fccd 100644
--- a/ql/src/test/results/clientpositive/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/auto_join4.q.out
@@ -55,12 +55,12 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join5.q.out b/ql/src/test/results/clientpositive/auto_join5.q.out
index c3b562d..4103061 100644
--- a/ql/src/test/results/clientpositive/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/auto_join5.q.out
@@ -55,12 +55,12 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out
index 5ca6798..2689578 100644
--- a/ql/src/test/results/clientpositive/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/auto_join8.q.out
@@ -55,12 +55,12 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
                     0 _col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
index 041621f..35e9a5d 100644
--- a/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
+++ b/ql/src/test/results/clientpositive/beeline/materialized_view_create_rewrite.q.out
@@ -320,3 +320,11 @@ POSTHOOK: Input: default@cmv_basetable
 #### A masked pattern was here ####
 3	9.80	3	978.76
 3	978.76	3	978.76
+PREHOOK: query: drop materialized view cmv_mat_view
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view
+PREHOOK: Output: default@cmv_mat_view
+POSTHOOK: query: drop materialized view cmv_mat_view
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view
+POSTHOOK: Output: default@cmv_mat_view

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
index a2f5dbf..afc4fd9 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out
@@ -715,12 +715,12 @@ STAGE PLANS:
             alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((key < 8) and (key < 6)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: (key < 6) (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: key
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -832,12 +832,12 @@ STAGE PLANS:
             alias: subq2:subq1:a
             Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((key < 8) and (key < 6)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: (key < 6) (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: key
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1
@@ -1393,12 +1393,12 @@ STAGE PLANS:
             alias: a:subq2:subq1:a
             Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((key < 8) and (key < 6)) (type: boolean)
-              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+              predicate: (key < 6) (type: boolean)
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: key (type: int)
                 outputColumnNames: key
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Sorted Merge Bucket Map Join Operator
                   condition map:
                        Inner Join 0 to 1

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
index 6d6a38a..a4c7f77 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
@@ -30,18 +30,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -51,18 +51,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -224,17 +224,17 @@ STAGE PLANS:
             0 key (type: string)
             1 key (type: string)
           outputColumnNames: key, value, key0, value0
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
             outputColumnNames: key, value, key1, value1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -328,18 +328,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -349,18 +349,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: key, value
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: key (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: value (type: string)
                   auto parallelism: false
@@ -522,17 +522,17 @@ STAGE PLANS:
             0 key (type: string)
             1 key (type: string)
           outputColumnNames: key, value, key0, value0
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
             outputColumnNames: key, value, key1, value1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_basic2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out
index 38b07be..2e9c340 100644
--- a/ql/src/test/results/clientpositive/druid_basic2.q.out
+++ b/ql/src/test/results/clientpositive/druid_basic2.q.out
@@ -166,7 +166,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"filter":{"type":"selector","dimension":"language","value":"en"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
             druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           GatherStats: false
@@ -554,12 +554,12 @@ STAGE PLANS:
           TableScan
             alias: druid_table_1
             properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
               druid.query.type groupBy
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             GatherStats: false
             Select Operator
-              expressions: robot (type: string), __time (type: timestamp), $f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
+              expressions: robot (type: string), floor_day (type: timestamp), $f3 (type: float), $f4 (type: float), UDFToInteger(robot) (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3, _col5
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Reduce Output Operator
@@ -589,7 +589,7 @@ STAGE PLANS:
               columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
               columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
               druid.datasource wikipedia
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
               druid.query.type groupBy
 #### A masked pattern was here ####
               name default.druid_table_1
@@ -615,7 +615,7 @@ STAGE PLANS:
                 columns.comments 'from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer','from deserializer'
                 columns.types timestamp:string:string:string:string:string:string:string:string:float:float:float:float:float
                 druid.datasource wikipedia
-                druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"day","dimensions":["robot","language"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+                druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"language"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"delta"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
                 druid.query.type groupBy
 #### A masked pattern was here ####
                 name default.druid_table_1
@@ -832,16 +832,16 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: druid_table_1
-            filterExpr: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
+            filterExpr: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
             properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"none","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default"},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
               druid.query.type groupBy
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: floor_day(__time) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
+              predicate: floor_day(extract) BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: robot (type: string), __time (type: timestamp)
+                expressions: robot (type: string), extract (type: timestamp)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
@@ -889,83 +889,24 @@ ORDER BY robot
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: __time BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
-            properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: __time BETWEEN 1999-11-01 00:00:00.0 AND 1999-11-10 00:00:00.0 (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: robot (type: string), floor_day(__time) (type: timestamp)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: timestamp)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: timestamp)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    TopN Hash Memory Usage: 0.1
-      Reduce Operator Tree:
-        Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: timestamp)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: timestamp)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: 10
+      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_day","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"day","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"}]},"aggregations":[{"type":"longSum","name":"dummy_agg","fieldName":"dummy_agg"}],"intervals":["1999-11-01T00:00:00.000/1999-11-10T00:00:00.001"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_day (type: timestamp)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN EXTENDED
 SELECT robot, floor_day(`__time`), max(added) as m, sum(delta) as s

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_intervals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out
index 90013c6..433f15e 100644
--- a/ql/src/test/results/clientpositive/druid_intervals.q.out
+++ b/ql/src/test/results/clientpositive/druid_intervals.q.out
@@ -109,7 +109,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/2012-03-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
@@ -139,7 +139,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2012-03-01T08:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-03-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
@@ -171,7 +171,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T08:00:00.000/2011-01-01T08:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
@@ -191,40 +191,24 @@ FROM druid_table_1
 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: __time BETWEEN 2010-01-01 00:00:00.0 AND 2011-01-01 00:00:00.0 (type: boolean)
-            properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: __time BETWEEN 2010-01-01 00:00:00.0 AND 2011-01-01 00:00:00.0 (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: __time (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `__time`
@@ -249,7 +233,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":[],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2011-01-01T00:00:00.001","2012-01-01T00:00:00.000/2013-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
@@ -281,7 +265,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":[],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2012-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
             druid.query.type select
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
@@ -301,40 +285,24 @@ FROM druid_table_1
 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: (__time) IN (2010-01-01 00:00:00.0, 2011-01-01 00:00:00.0) (type: boolean)
-            properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: (__time) IN (2010-01-01 00:00:00.0, 2011-01-01 00:00:00.0) (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: __time (type: timestamp)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp)
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `__time`, robot
@@ -347,40 +315,24 @@ FROM druid_table_1
 WHERE robot = 'user1' AND `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00')
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            filterExpr: ((__time) IN (2010-01-01 00:00:00.0, 2011-01-01 00:00:00.0) and (robot = 'user1')) (type: boolean)
-            properties:
-              druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"dimensions":["robot"],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
-              druid.query.type select
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Filter Operator
-              predicate: ((__time) IN (2010-01-01 00:00:00.0, 2011-01-01 00:00:00.0) and (robot = 'user1')) (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: __time (type: timestamp), 'user1' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"select","dataSource":"wikipedia","descending":false,"intervals":["2010-01-01T00:00:00.000/2010-01-01T00:00:00.001","2011-01-01T00:00:00.000/2011-01-01T00:00:00.001"],"filter":{"type":"selector","dimension":"robot","value":"user1"},"dimensions":[],"metrics":[],"granularity":"all","pagingSpec":{"threshold":16384,"fromNext":true},"context":{"druid.query.fetch":false}}
+            druid.query.type select
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: __time (type: timestamp), 'user1' (type: string)
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT `__time`, robot

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_timeseries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out
index bd4d6de..0ce1abe 100644
--- a/ql/src/test/results/clientpositive/druid_timeseries.q.out
+++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out
@@ -109,11 +109,11 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"timeseries","dataSource":"wikipedia","descending":false,"granularity":"none","aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"],"context":{"skipEmptyBuckets":true}}
-            druid.query.type timeseries
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}}],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: __time (type: timestamp), $f1 (type: float), $f2 (type: float)
+            expressions: extract (type: timestamp), $f1 (type: float), $f2 (type: float)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             ListSink


[09/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
deleted file mode 100644
index 93dcc0e..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java
+++ /dev/null
@@ -1,2458 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views;
-
-import static org.apache.calcite.rex.RexUtil.andNot;
-import static org.apache.calcite.rex.RexUtil.removeAll;
-import static org.apache.calcite.rex.RexUtil.simplify;
-
-import java.util.AbstractList;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Set;
-
-import org.apache.calcite.avatica.util.Spaces;
-import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.plan.RelOptCluster;
-import org.apache.calcite.plan.RelOptRule;
-import org.apache.calcite.plan.RelOptRuleCall;
-import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.plan.RexImplicationChecker;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.SingleRel;
-import org.apache.calcite.rel.core.Aggregate;
-import org.apache.calcite.rel.core.AggregateCall;
-import org.apache.calcite.rel.core.CorrelationId;
-import org.apache.calcite.rel.core.Filter;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinRelType;
-import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.core.Sort;
-import org.apache.calcite.rel.core.TableScan;
-import org.apache.calcite.rel.core.Values;
-import org.apache.calcite.rel.logical.LogicalAggregate;
-import org.apache.calcite.rel.logical.LogicalJoin;
-import org.apache.calcite.rel.logical.LogicalSort;
-import org.apache.calcite.rel.logical.LogicalUnion;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
-import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.rex.RexCall;
-import org.apache.calcite.rex.RexExecutorImpl;
-import org.apache.calcite.rex.RexInputRef;
-import org.apache.calcite.rex.RexLiteral;
-import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexShuttle;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.sql.SqlAggFunction;
-import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.sql.validate.SqlValidatorUtil;
-import org.apache.calcite.tools.RelBuilder;
-import org.apache.calcite.tools.RelBuilderFactory;
-import org.apache.calcite.util.Bug;
-import org.apache.calcite.util.ControlFlowException;
-import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Litmus;
-import org.apache.calcite.util.Pair;
-import org.apache.calcite.util.Util;
-import org.apache.calcite.util.mapping.Mapping;
-import org.apache.calcite.util.mapping.Mappings;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Equivalence;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.LinkedHashMultimap;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-
-/**
- * Substitutes part of a tree of relational expressions with another tree.
- *
- * <p>The call {@code new SubstitutionVisitor(target, query).go(replacement))}
- * will return {@code query} with every occurrence of {@code target} replaced
- * by {@code replacement}.</p>
- *
- * <p>The following example shows how {@code SubstitutionVisitor} can be used
- * for materialized view recognition.</p>
- *
- * <ul>
- * <li>query = SELECT a, c FROM t WHERE x = 5 AND b = 4</li>
- * <li>target = SELECT a, b, c FROM t WHERE x = 5</li>
- * <li>replacement = SELECT * FROM mv</li>
- * <li>result = SELECT a, c FROM mv WHERE b = 4</li>
- * </ul>
- *
- * <p>Note that {@code result} uses the materialized view table {@code mv} and a
- * simplified condition {@code b = 4}.</p>
- *
- * <p>Uses a bottom-up matching algorithm. Nodes do not need to be identical.
- * At each level, returns the residue.</p>
- *
- * <p>The inputs must only include the core relational operators:
- * {@link org.apache.calcite.rel.logical.LogicalTableScan},
- * {@link org.apache.calcite.rel.logical.LogicalFilter},
- * {@link org.apache.calcite.rel.logical.LogicalProject},
- * {@link org.apache.calcite.rel.logical.LogicalJoin},
- * {@link org.apache.calcite.rel.logical.LogicalUnion},
- * {@link org.apache.calcite.rel.logical.LogicalAggregate}.</p>
- *
- * TODO: Remove when we upgrade to Calcite version using builders.
- */
-public class SubstitutionVisitor {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(SubstitutionVisitor.class);
-
-  /** Equivalence that compares objects by their {@link Object#toString()}
-   * method. */
-  private static final Equivalence<Object> STRING_EQUIVALENCE =
-      new Equivalence<Object>() {
-        @Override protected boolean doEquivalent(Object o, Object o2) {
-          return o.toString().equals(o2.toString());
-        }
-
-        @Override protected int doHash(Object o) {
-          return o.toString().hashCode();
-        }
-      };
-
-  /** Equivalence that compares {@link Lists}s by the
-   * {@link Object#toString()} of their elements. */
-  @SuppressWarnings("unchecked")
-  private static final Equivalence<List<?>> PAIRWISE_STRING_EQUIVALENCE =
-      (Equivalence) STRING_EQUIVALENCE.pairwise();
-
-  protected static final ImmutableList<UnifyRule> DEFAULT_RULES =
-      ImmutableList.<UnifyRule>of(
-          TrivialRule.INSTANCE,
-          ScanToProjectUnifyRule.INSTANCE,
-          ProjectToProjectUnifyRule.INSTANCE,
-          FilterToProjectUnifyRule.INSTANCE,
-//          ProjectToFilterUnifyRule.INSTANCE,
-//          FilterToFilterUnifyRule.INSTANCE,
-          AggregateToAggregateUnifyRule.INSTANCE,
-          AggregateOnProjectToAggregateUnifyRule.INSTANCE);
-
-  /**
-   * Factory for a builder for relational expressions.
-   * <p>The actual builder is available via {@link RelOptRuleCall#builder()}.
-   */
-  protected final RelBuilder relBuilder;
-
-  private final ImmutableList<UnifyRule> rules;
-  private final Map<Pair<Class, Class>, List<UnifyRule>> ruleMap =
-      new HashMap<>();
-  private final RelOptCluster cluster;
-  private final Holder query;
-  private final MutableRel target;
-
-  /**
-   * Nodes in {@link #target} that have no children.
-   */
-  final List<MutableRel> targetLeaves;
-
-  /**
-   * Nodes in {@link #query} that have no children.
-   */
-  final List<MutableRel> queryLeaves;
-
-  final Map<MutableRel, MutableRel> replacementMap = new HashMap<>();
-
-  final Multimap<MutableRel, MutableRel> equivalents =
-      LinkedHashMultimap.create();
-
-  /** Workspace while rule is being matched.
-   * Careful, re-entrant!
-   * Assumes no rule needs more than 2 slots. */
-  protected final MutableRel[] slots = new MutableRel[2];
-
-  /** Creates a SubstitutionVisitor with the default rule set. */
-  public SubstitutionVisitor(RelNode target_, RelNode query_) {
-    this(target_, query_, DEFAULT_RULES);
-  }
-
-  public SubstitutionVisitor(RelNode target_, RelNode query_,
-          ImmutableList<UnifyRule> rules) {
-    this(target_, query_, rules, RelFactories.LOGICAL_BUILDER);
-  }
-
-  /** Creates a SubstitutionVisitor. */
-  public SubstitutionVisitor(RelNode target_, RelNode query_,
-      ImmutableList<UnifyRule> rules, RelBuilderFactory relBuilderFactory) {
-    this.cluster = target_.getCluster();
-    this.rules = rules;
-    this.query = Holder.of(toMutable(query_));
-    this.target = toMutable(target_);
-    this.relBuilder = relBuilderFactory.create(cluster, null);
-    final Set<MutableRel> parents = Sets.newIdentityHashSet();
-    final List<MutableRel> allNodes = new ArrayList<>();
-    final MutableRelVisitor visitor =
-        new MutableRelVisitor() {
-          public void visit(MutableRel node) {
-            parents.add(node.parent);
-            allNodes.add(node);
-            super.visit(node);
-          }
-        };
-    visitor.go(target);
-
-    // Populate the list of leaves in the tree under "target".
-    // Leaves are all nodes that are not parents.
-    // For determinism, it is important that the list is in scan order.
-    allNodes.removeAll(parents);
-    targetLeaves = ImmutableList.copyOf(allNodes);
-
-    allNodes.clear();
-    parents.clear();
-    visitor.go(query);
-    allNodes.removeAll(parents);
-    queryLeaves = ImmutableList.copyOf(allNodes);
-  }
-
-  private static MutableRel toMutable(RelNode rel) {
-    if (rel instanceof TableScan) {
-      return MutableScan.of((TableScan) rel);
-    }
-    if (rel instanceof Values) {
-      return MutableValues.of((Values) rel);
-    }
-    if (rel instanceof Project) {
-      final Project project = (Project) rel;
-      final MutableRel input = toMutable(project.getInput());
-      return MutableProject.of(input, project.getProjects(),
-          project.getRowType().getFieldNames());
-    }
-    if (rel instanceof Filter) {
-      final Filter filter = (Filter) rel;
-      final MutableRel input = toMutable(filter.getInput());
-      return MutableFilter.of(input, filter.getCondition());
-    }
-    if (rel instanceof Aggregate) {
-      final Aggregate aggregate = (Aggregate) rel;
-      final MutableRel input = toMutable(aggregate.getInput());
-      return MutableAggregate.of(input, aggregate.indicator,
-          aggregate.getGroupSet(), aggregate.getGroupSets(),
-          aggregate.getAggCallList());
-    }
-    if (rel instanceof Join) {
-      final Join join = (Join) rel;
-      final MutableRel left = toMutable(join.getLeft());
-      final MutableRel right = toMutable(join.getRight());
-      return MutableJoin.of(join.getCluster(), left, right,
-          join.getCondition(), join.getJoinType(), join.getVariablesSet());
-    }
-    if (rel instanceof Sort) {
-      final Sort sort = (Sort) rel;
-      final MutableRel input = toMutable(sort.getInput());
-      return MutableSort.of(input, sort.getCollation(), sort.offset, sort.fetch);
-    }
-    throw new RuntimeException("cannot translate " + rel + " to MutableRel");
-  }
-
-  void register(MutableRel result, MutableRel query) {
-  }
-
-  /**
-   * Maps a condition onto a target.
-   *
-   * <p>If condition is stronger than target, returns the residue.
-   * If it is equal to target, returns the expression that evaluates to
-   * the constant {@code true}. If it is weaker than target, returns
-   * {@code null}.</p>
-   *
-   * <p>The terms satisfy the relation</p>
-   *
-   * <pre>
-   *     {@code condition = target AND residue}
-   * </pre>
-   *
-   * <p>and {@code residue} must be as weak as possible.</p>
-   *
-   * <p>Example #1: condition stronger than target</p>
-   * <ul>
-   * <li>condition: x = 1 AND y = 2</li>
-   * <li>target: x = 1</li>
-   * <li>residue: y = 2</li>
-   * </ul>
-   *
-   * <p>Note that residue {@code x &gt; 0 AND y = 2} would also satisfy the
-   * relation {@code condition = target AND residue} but is stronger than
-   * necessary, so we prefer {@code y = 2}.</p>
-   *
-   * <p>Example #2: target weaker than condition (valid, but not currently
-   * implemented)</p>
-   * <ul>
-   * <li>condition: x = 1</li>
-   * <li>target: x = 1 OR z = 3</li>
-   * <li>residue: NOT (z = 3)</li>
-   * </ul>
-   *
-   * <p>Example #3: condition and target are equivalent</p>
-   * <ul>
-   * <li>condition: x = 1 AND y = 2</li>
-   * <li>target: y = 2 AND x = 1</li>
-   * <li>residue: TRUE</li>
-   * </ul>
-   *
-   * <p>Example #4: condition weaker than target</p>
-   * <ul>
-   * <li>condition: x = 1</li>
-   * <li>target: x = 1 AND y = 2</li>
-   * <li>residue: null (i.e. no match)</li>
-   * </ul>
-   *
-   * <p>There are many other possible examples. It amounts to solving
-   * whether {@code condition AND NOT target} can ever evaluate to
-   * true, and therefore is a form of the NP-complete
-   * <a href="http://en.wikipedia.org/wiki/Satisfiability">Satisfiability</a>
-   * problem.</p>
-   */
-  @VisibleForTesting
-  public static RexNode splitFilter(
-      final RexBuilder rexBuilder, RexNode condition, RexNode target) {
-    // First, try splitting into ORs.
-    // Given target    c1 OR c2 OR c3 OR c4
-    // and condition   c2 OR c4
-    // residue is      NOT c1 AND NOT c3
-    // Also deals with case target [x] condition [x] yields residue [true].
-    RexNode z = splitOr(rexBuilder, condition, target);
-    if (z != null) {
-      return z;
-    }
-
-    RexNode x = andNot(rexBuilder, target, condition);
-    if (mayBeSatisfiable(x)) {
-      RexNode x2 = andNot(rexBuilder, condition, target);
-      return simplify(rexBuilder, x2);
-    }
-    return null;
-  }
-
-  private static RexNode splitOr(
-      final RexBuilder rexBuilder, RexNode condition, RexNode target) {
-    List<RexNode> targets = RelOptUtil.disjunctions(target);
-    for (RexNode e : RelOptUtil.disjunctions(condition)) {
-      boolean found = removeAll(targets, e);
-      if (!found) {
-        return null;
-      }
-    }
-    return RexUtil.composeConjunction(rexBuilder,
-        Lists.transform(targets, RexUtil.notFn(rexBuilder)), false);
-  }
-
-  /**
-   * Returns whether a boolean expression ever returns true.
-   *
-   * <p>This method may give false positives. For instance, it will say
-   * that {@code x = 5 AND x > 10} is satisfiable, because at present it
-   * cannot prove that it is not.</p>
-   */
-  public static boolean mayBeSatisfiable(RexNode e) {
-    // Example:
-    //  e: x = 1 AND y = 2 AND z = 3 AND NOT (x = 1 AND y = 2)
-    //  disjunctions: {x = 1, y = 2, z = 3}
-    //  notDisjunctions: {x = 1 AND y = 2}
-    final List<RexNode> disjunctions = new ArrayList<>();
-    final List<RexNode> notDisjunctions = new ArrayList<>();
-    RelOptUtil.decomposeConjunction(e, disjunctions, notDisjunctions);
-
-    // If there is a single FALSE or NOT TRUE, the whole expression is
-    // always false.
-    for (RexNode disjunction : disjunctions) {
-      switch (disjunction.getKind()) {
-      case LITERAL:
-        if (!RexLiteral.booleanValue(disjunction)) {
-          return false;
-        }
-      }
-    }
-    for (RexNode disjunction : notDisjunctions) {
-      switch (disjunction.getKind()) {
-      case LITERAL:
-        if (RexLiteral.booleanValue(disjunction)) {
-          return false;
-        }
-      }
-    }
-    // If one of the not-disjunctions is a disjunction that is wholly
-    // contained in the disjunctions list, the expression is not
-    // satisfiable.
-    //
-    // Example #1. x AND y AND z AND NOT (x AND y)  - not satisfiable
-    // Example #2. x AND y AND NOT (x AND y)        - not satisfiable
-    // Example #3. x AND y AND NOT (x AND y AND z)  - may be satisfiable
-    for (RexNode notDisjunction : notDisjunctions) {
-      final List<RexNode> disjunctions2 =
-          RelOptUtil.conjunctions(notDisjunction);
-      if (disjunctions.containsAll(disjunctions2)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  public RelNode go0(RelNode replacement_) {
-    assert false; // not called
-    MutableRel replacement = toMutable(replacement_);
-    assert MutableRels.equalType(
-        "target", target, "replacement", replacement, Litmus.THROW);
-    replacementMap.put(target, replacement);
-    final UnifyResult unifyResult = matchRecurse(target);
-    if (unifyResult == null) {
-      return null;
-    }
-    final MutableRel node0 = unifyResult.result;
-    MutableRel node = node0; // replaceAncestors(node0);
-    if (LOGGER.isDebugEnabled()) {
-      LOGGER.debug("Convert: query:\n"
-          + query.deep()
-          + "\nunify.query:\n"
-          + unifyResult.call.query.deep()
-          + "\nunify.result:\n"
-          + unifyResult.result.deep()
-          + "\nunify.target:\n"
-          + unifyResult.call.target.deep()
-          + "\nnode0:\n"
-          + node0.deep()
-          + "\nnode:\n"
-          + node.deep());
-    }
-    return fromMutable(node);
-  }
-
-  /**
-   * Returns a list of all possible rels that result from substituting the
-   * matched RelNode with the replacement RelNode within the query.
-   *
-   * <p>For example, the substitution result of A join B, while A and B
-   * are both a qualified match for replacement R, is R join B, R join R,
-   * A join R.
-   */
-  public List<RelNode> go(RelNode replacement_) {
-    List<List<Replacement>> matches = go(toMutable(replacement_));
-    if (matches.isEmpty()) {
-      return ImmutableList.of();
-    }
-    List<RelNode> sub = Lists.newArrayList();
-    sub.add(fromMutable(query.input));
-    reverseSubstitute(query, matches, sub, 0, matches.size());
-    return sub;
-  }
-
-  /**
-   * Substitutes the query with replacement whenever possible but meanwhile
-   * keeps track of all the substitutions and their original rel before
-   * replacement, so that in later processing stage, the replacement can be
-   * recovered individually to produce a list of all possible rels with
-   * substitution in different places.
-   */
-  private List<List<Replacement>> go(MutableRel replacement) {
-    assert MutableRels.equalType(
-        "target", target, "replacement", replacement, Litmus.THROW);
-    final List<MutableRel> queryDescendants = MutableRels.descendants(query);
-    final List<MutableRel> targetDescendants = MutableRels.descendants(target);
-
-    // Populate "equivalents" with (q, t) for each query descendant q and
-    // target descendant t that are equal.
-    final Map<MutableRel, MutableRel> map = Maps.newHashMap();
-    for (MutableRel queryDescendant : queryDescendants) {
-      map.put(queryDescendant, queryDescendant);
-    }
-    for (MutableRel targetDescendant : targetDescendants) {
-      MutableRel queryDescendant = map.get(targetDescendant);
-      if (queryDescendant != null) {
-        assert queryDescendant.rowType.equals(targetDescendant.rowType);
-        equivalents.put(queryDescendant, targetDescendant);
-      }
-    }
-    map.clear();
-
-    final List<Replacement> attempted = Lists.newArrayList();
-    List<List<Replacement>> substitutions = Lists.newArrayList();
-
-    for (;;) {
-      int count = 0;
-      MutableRel queryDescendant = query;
-    outer:
-      while (queryDescendant != null) {
-        for (Replacement r : attempted) {
-          if (queryDescendant == r.after) {
-            // This node has been replaced by previous iterations in the
-            // hope to match its ancestors, so the node itself should not
-            // be matched again.
-            queryDescendant = MutableRels.preOrderTraverseNext(queryDescendant);
-            continue outer;
-          }
-        }
-        final MutableRel next = MutableRels.preOrderTraverseNext(queryDescendant);
-        final MutableRel childOrNext =
-            queryDescendant.getInputs().isEmpty()
-                ? next : queryDescendant.getInputs().get(0);
-        for (MutableRel targetDescendant : targetDescendants) {
-          for (UnifyRule rule
-              : applicableRules(queryDescendant, targetDescendant)) {
-            UnifyRuleCall call =
-                rule.match(this, queryDescendant, targetDescendant);
-            if (call != null) {
-              final UnifyResult result = rule.apply(call);
-              if (result != null) {
-                ++count;
-                attempted.add(new Replacement(result.call.query, result.result));
-                MutableRel parent = result.call.query.replaceInParent(result.result);
-
-                // Replace previous equivalents with new equivalents, higher up
-                // the tree.
-                for (int i = 0; i < rule.slotCount; i++) {
-                  Collection<MutableRel> equi = equivalents.get(slots[i]);
-                  if (!equi.isEmpty()) {
-                    equivalents.remove(slots[i], equi.iterator().next());
-                  }
-                }
-                assert result.result.rowType.equals(result.call.query.rowType)
-                    : Pair.of(result.result, result.call.query);
-                equivalents.put(result.result, result.call.query);
-                if (targetDescendant == target) {
-                  // A real substitution happens. We purge the attempted
-                  // replacement list and add them into substitution list.
-                  // Meanwhile we stop matching the descendants and jump
-                  // to the next subtree in pre-order traversal.
-                  if (!target.equals(replacement)) {
-                    Replacement r = MutableRels.replace(
-                        query.input, target, copyMutable(replacement));
-                    assert r != null
-                        : rule + "should have returned a result containing the target.";
-                    attempted.add(r);
-                  }
-                  substitutions.add(ImmutableList.copyOf(attempted));
-                  attempted.clear();
-                  queryDescendant = next;
-                  continue outer;
-                }
-                // We will try walking the query tree all over again to see
-                // if there can be any substitutions after the replacement
-                // attempt.
-                break outer;
-              }
-            }
-          }
-        }
-        queryDescendant = childOrNext;
-      }
-      // Quit the entire loop if:
-      // 1) we have walked the entire query tree with one or more successful
-      //    substitutions, thus count != 0 && attempted.isEmpty();
-      // 2) we have walked the entire query tree but have made no replacement
-      //    attempt, thus count == 0 && attempted.isEmpty();
-      // 3) we had done some replacement attempt in a previous walk, but in
-      //    this one we have not found any potential matches or substitutions,
-      //    thus count == 0 && !attempted.isEmpty().
-      if (count == 0 || attempted.isEmpty()) {
-        break;
-      }
-    }
-    if (!attempted.isEmpty()) {
-      // We had done some replacement attempt in the previous walk, but that
-      // did not lead to any substitutions in this walk, so we need to recover
-      // the replacement.
-      undoReplacement(attempted);
-    }
-    return substitutions;
-  }
-
-  /**
-   * Represents a replacement action: before &rarr; after.
-   */
-  private static class Replacement {
-    final MutableRel before;
-    final MutableRel after;
-
-    Replacement(MutableRel before, MutableRel after) {
-      this.before = before;
-      this.after = after;
-    }
-  }
-
-  private static void undoReplacement(List<Replacement> replacement) {
-    for (int i = replacement.size() - 1; i >= 0; i--) {
-      Replacement r = replacement.get(i);
-      r.after.replaceInParent(r.before);
-    }
-  }
-
-  private static void redoReplacement(List<Replacement> replacement) {
-    for (Replacement r : replacement) {
-      r.before.replaceInParent(r.after);
-    }
-  }
-
-  private void reverseSubstitute(Holder query,
-      List<List<Replacement>> matches, List<RelNode> sub,
-      int replaceCount, int maxCount) {
-    if (matches.isEmpty()) {
-      return;
-    }
-    final List<List<Replacement>> rem = matches.subList(1, matches.size());
-    reverseSubstitute(query, rem, sub, replaceCount, maxCount);
-    undoReplacement(matches.get(0));
-    if (++replaceCount < maxCount) {
-      sub.add(fromMutable(query.input));
-    }
-    reverseSubstitute(query, rem, sub, replaceCount, maxCount);
-    redoReplacement(matches.get(0));
-  }
-
-  private List<RelNode> fromMutables(List<MutableRel> nodes) {
-    return Lists.transform(nodes,
-        new Function<MutableRel, RelNode>() {
-          public RelNode apply(MutableRel mutableRel) {
-            return fromMutable(mutableRel);
-          }
-        });
-  }
-
-  private RelNode fromMutable(MutableRel node) {
-    switch (node.type) {
-    case SCAN:
-    case VALUES:
-      return ((MutableLeafRel) node).rel;
-    case PROJECT:
-      final MutableProject project = (MutableProject) node;
-      relBuilder.push(fromMutable(project.input));
-      relBuilder.project(project.projects);
-      return relBuilder.build();
-    case FILTER:
-      final MutableFilter filter = (MutableFilter) node;
-      relBuilder.push(fromMutable(filter.input));
-      relBuilder.filter(filter.condition);
-      return relBuilder.build();
-    case AGGREGATE:
-      final MutableAggregate aggregate = (MutableAggregate) node;
-      return LogicalAggregate.create(fromMutable(aggregate.input),
-          aggregate.indicator, aggregate.groupSet, aggregate.groupSets,
-          aggregate.aggCalls);
-    case SORT:
-      final MutableSort sort = (MutableSort) node;
-      return LogicalSort.create(fromMutable(sort.input), sort.collation,
-          sort.offset, sort.fetch);
-    case UNION:
-      final MutableUnion union = (MutableUnion) node;
-      return LogicalUnion.create(fromMutables(union.inputs), union.all);
-    case JOIN:
-      final MutableJoin join = (MutableJoin) node;
-      return LogicalJoin.create(fromMutable(join.getLeft()), fromMutable(join.getRight()),
-          join.getCondition(), join.getVariablesSet(), join.getJoinType());
-    default:
-      throw new AssertionError(node.deep());
-    }
-  }
-
-  private static List<MutableRel> copyMutables(List<MutableRel> nodes) {
-    return Lists.transform(nodes,
-        new Function<MutableRel, MutableRel>() {
-          public MutableRel apply(MutableRel mutableRel) {
-            return copyMutable(mutableRel);
-          }
-        });
-  }
-
-  private static MutableRel copyMutable(MutableRel node) {
-    switch (node.type) {
-    case SCAN:
-      return MutableScan.of((TableScan) ((MutableScan) node).rel);
-    case VALUES:
-      return MutableValues.of((Values) ((MutableValues) node).rel);
-    case PROJECT:
-      final MutableProject project = (MutableProject) node;
-      return MutableProject.of(project.rowType,
-          copyMutable(project.input), project.projects);
-    case FILTER:
-      final MutableFilter filter = (MutableFilter) node;
-      return MutableFilter.of(copyMutable(filter.input), filter.condition);
-    case AGGREGATE:
-      final MutableAggregate aggregate = (MutableAggregate) node;
-      return MutableAggregate.of(copyMutable(aggregate.input),
-          aggregate.indicator, aggregate.groupSet, aggregate.groupSets,
-          aggregate.aggCalls);
-    case SORT:
-      final MutableSort sort = (MutableSort) node;
-      return MutableSort.of(copyMutable(sort.input), sort.collation,
-          sort.offset, sort.fetch);
-    case UNION:
-      final MutableUnion union = (MutableUnion) node;
-      return MutableUnion.of(copyMutables(union.inputs), union.all);
-    case JOIN:
-      final MutableJoin join = (MutableJoin) node;
-      return MutableJoin.of(join.cluster, copyMutable(join.getLeft()),
-          copyMutable(join.getRight()), join.getCondition(), join.getJoinType(),
-          join.getVariablesSet());
-    default:
-      throw new AssertionError(node.deep());
-    }
-  }
-
-  private UnifyResult matchRecurse(MutableRel target) {
-    assert false; // not called
-    final List<MutableRel> targetInputs = target.getInputs();
-    MutableRel queryParent = null;
-
-    for (MutableRel targetInput : targetInputs) {
-      UnifyResult unifyResult = matchRecurse(targetInput);
-      if (unifyResult == null) {
-        return null;
-      }
-      queryParent = unifyResult.call.query.replaceInParent(unifyResult.result);
-    }
-
-    if (targetInputs.isEmpty()) {
-      for (MutableRel queryLeaf : queryLeaves) {
-        for (UnifyRule rule : applicableRules(queryLeaf, target)) {
-          final UnifyResult x = apply(rule, queryLeaf, target);
-          if (x != null) {
-            if (LOGGER.isDebugEnabled()) {
-              LOGGER.debug("Rule: " + rule
-                  + "\nQuery:\n"
-                  + queryParent
-                  + (x.call.query != queryParent
-                     ? "\nQuery (original):\n"
-                     + queryParent
-                     : "")
-                  + "\nTarget:\n"
-                  + target.deep()
-                  + "\nResult:\n"
-                  + x.result.deep()
-                  + "\n");
-            }
-            return x;
-          }
-        }
-      }
-    } else {
-      assert queryParent != null;
-      for (UnifyRule rule : applicableRules(queryParent, target)) {
-        final UnifyResult x = apply(rule, queryParent, target);
-        if (x != null) {
-          if (LOGGER.isDebugEnabled()) {
-            LOGGER.debug(
-                "Rule: " + rule
-                + "\nQuery:\n"
-                + queryParent.deep()
-                + (x.call.query != queryParent
-                   ? "\nQuery (original):\n"
-                   + queryParent.toString()
-                   : "")
-                + "\nTarget:\n"
-                + target.deep()
-                + "\nResult:\n"
-                + x.result.deep()
-                + "\n");
-          }
-          return x;
-        }
-      }
-    }
-    if (LOGGER.isDebugEnabled()) {
-      LOGGER.debug(
-          "Unify failed:"
-          + "\nQuery:\n"
-          + queryParent.toString()
-          + "\nTarget:\n"
-          + target.toString()
-          + "\n");
-    }
-    return null;
-  }
-
-  private UnifyResult apply(UnifyRule rule, MutableRel query,
-      MutableRel target) {
-    final UnifyRuleCall call = new UnifyRuleCall(rule, query, target, null);
-    return rule.apply(call);
-  }
-
-  private List<UnifyRule> applicableRules(MutableRel query,
-      MutableRel target) {
-    final Class queryClass = query.getClass();
-    final Class targetClass = target.getClass();
-    final Pair<Class, Class> key = Pair.of(queryClass, targetClass);
-    List<UnifyRule> list = ruleMap.get(key);
-    if (list == null) {
-      final ImmutableList.Builder<UnifyRule> builder =
-          ImmutableList.builder();
-      for (UnifyRule rule : rules) {
-        //noinspection unchecked
-        if (mightMatch(rule, queryClass, targetClass)) {
-          builder.add(rule);
-        }
-      }
-      list = builder.build();
-      ruleMap.put(key, list);
-    }
-    return list;
-  }
-
-  private static boolean mightMatch(UnifyRule rule,
-      Class queryClass, Class targetClass) {
-    return rule.queryOperand.clazz.isAssignableFrom(queryClass)
-        && rule.targetOperand.clazz.isAssignableFrom(targetClass);
-  }
-
-  /** Exception thrown to exit a matcher. Not really an error. */
-  protected static class MatchFailed extends ControlFlowException {
-    @SuppressWarnings("ThrowableInstanceNeverThrown")
-    public static final MatchFailed INSTANCE = new MatchFailed();
-  }
-
-  /** Rule that attempts to match a query relational expression
-   * against a target relational expression.
-   *
-   * <p>The rule declares the query and target types; this allows the
-   * engine to fire only a few rules in a given context.</p>
-   */
-  protected abstract static class UnifyRule {
-    protected final int slotCount;
-    protected final Operand queryOperand;
-    protected final Operand targetOperand;
-
-    protected UnifyRule(int slotCount, Operand queryOperand,
-        Operand targetOperand) {
-      this.slotCount = slotCount;
-      this.queryOperand = queryOperand;
-      this.targetOperand = targetOperand;
-    }
-
-    /**
-     * <p>Applies this rule to a particular node in a query. The goal is
-     * to convert {@code query} into {@code target}. Before the rule is
-     * invoked, Calcite has made sure that query's children are equivalent
-     * to target's children.
-     *
-     * <p>There are 3 possible outcomes:</p>
-     *
-     * <ul>
-     *
-     * <li>{@code query} already exactly matches {@code target}; returns
-     * {@code target}</li>
-     *
-     * <li>{@code query} is sufficiently close to a match for
-     * {@code target}; returns {@code target}</li>
-     *
-     * <li>{@code query} cannot be made to match {@code target}; returns
-     * null</li>
-     *
-     * </ul>
-     *
-     * <p>REVIEW: Is possible that we match query PLUS one or more of its
-     * ancestors?</p>
-     *
-     * @param call Input parameters
-     */
-    protected abstract UnifyResult apply(UnifyRuleCall call);
-
-    protected UnifyRuleCall match(SubstitutionVisitor visitor, MutableRel query,
-        MutableRel target) {
-      if (queryOperand.matches(visitor, query)) {
-        if (targetOperand.matches(visitor, target)) {
-          return visitor.new UnifyRuleCall(this, query, target,
-              copy(visitor.slots, slotCount));
-        }
-      }
-      return null;
-    }
-
-    protected <E> ImmutableList<E> copy(E[] slots, int slotCount) {
-      // Optimize if there are 0 or 1 slots.
-      switch (slotCount) {
-      case 0:
-        return ImmutableList.of();
-      case 1:
-        return ImmutableList.of(slots[0]);
-      default:
-        return ImmutableList.copyOf(slots).subList(0, slotCount);
-      }
-    }
-  }
-
-  /**
-   * Arguments to an application of a {@link UnifyRule}.
-   */
-  protected class UnifyRuleCall {
-    protected final UnifyRule rule;
-    public final MutableRel query;
-    public final MutableRel target;
-    protected final ImmutableList<MutableRel> slots;
-
-    public UnifyRuleCall(UnifyRule rule, MutableRel query, MutableRel target,
-        ImmutableList<MutableRel> slots) {
-      this.rule = Preconditions.checkNotNull(rule);
-      this.query = Preconditions.checkNotNull(query);
-      this.target = Preconditions.checkNotNull(target);
-      this.slots = Preconditions.checkNotNull(slots);
-    }
-
-    public UnifyResult result(MutableRel result) {
-      assert MutableRels.contains(result, target);
-      assert MutableRels.equalType("result", result, "query", query,
-          Litmus.THROW);
-      MutableRel replace = replacementMap.get(target);
-      if (replace != null) {
-        assert false; // replacementMap is always empty
-        // result =
-        MutableRels.replace(result, target, replace);
-      }
-      register(result, query);
-      return new UnifyResult(this, result);
-    }
-
-    /**
-     * Creates a {@link UnifyRuleCall} based on the parent of {@code query}.
-     */
-    public UnifyRuleCall create(MutableRel query) {
-      return new UnifyRuleCall(rule, query, target, slots);
-    }
-
-    public RelOptCluster getCluster() {
-      return cluster;
-    }
-  }
-
-  /**
-   * Result of an application of a {@link UnifyRule} indicating that the
-   * rule successfully matched {@code query} against {@code target} and
-   * generated a {@code result} that is equivalent to {@code query} and
-   * contains {@code target}.
-   */
-  protected static class UnifyResult {
-    private final UnifyRuleCall call;
-    // equivalent to "query", contains "result"
-    private final MutableRel result;
-
-    UnifyResult(UnifyRuleCall call, MutableRel result) {
-      this.call = call;
-      assert MutableRels.equalType("query", call.query, "result", result,
-          Litmus.THROW);
-      this.result = result;
-    }
-  }
-
-  /** Abstract base class for implementing {@link UnifyRule}. */
-  protected abstract static class AbstractUnifyRule extends UnifyRule {
-    public AbstractUnifyRule(Operand queryOperand, Operand targetOperand,
-        int slotCount) {
-      super(slotCount, queryOperand, targetOperand);
-      //noinspection AssertWithSideEffects
-      assert isValid();
-    }
-
-    protected boolean isValid() {
-      final SlotCounter slotCounter = new SlotCounter();
-      slotCounter.visit(queryOperand);
-      assert slotCounter.queryCount == slotCount;
-      assert slotCounter.targetCount == 0;
-      slotCounter.queryCount = 0;
-      slotCounter.visit(targetOperand);
-      assert slotCounter.queryCount == 0;
-      assert slotCounter.targetCount == slotCount;
-      return true;
-    }
-
-    /** Creates an operand with given inputs. */
-    protected static Operand operand(Class<? extends MutableRel> clazz,
-        Operand... inputOperands) {
-      return new InternalOperand(clazz, ImmutableList.copyOf(inputOperands));
-    }
-
-    /** Creates an operand that doesn't check inputs. */
-    protected static Operand any(Class<? extends MutableRel> clazz) {
-      return new AnyOperand(clazz);
-    }
-
-    /** Creates an operand that matches a relational expression in the query. */
-    protected static Operand query(int ordinal) {
-      return new QueryOperand(ordinal);
-    }
-
-    /** Creates an operand that matches a relational expression in the
-     * target. */
-    protected static Operand target(int ordinal) {
-      return new TargetOperand(ordinal);
-    }
-  }
-
-  /** Implementation of {@link UnifyRule} that matches if the query is already
-   * equal to the target.
-   *
-   * <p>Matches scans to the same table, because these will be
-   * {@link MutableScan}s with the same
-   * {@link org.apache.calcite.rel.logical.LogicalTableScan} instance.</p>
-   */
-  private static class TrivialRule extends AbstractUnifyRule {
-    private static final TrivialRule INSTANCE = new TrivialRule();
-
-    private TrivialRule() {
-      super(any(MutableRel.class), any(MutableRel.class), 0);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      if (call.query.equals(call.target)) {
-        return call.result(call.query);
-      }
-      return null;
-    }
-  }
-
-  /** Implementation of {@link UnifyRule} that matches
-   * {@link org.apache.calcite.rel.logical.LogicalTableScan}. */
-  private static class ScanToProjectUnifyRule extends AbstractUnifyRule {
-    public static final ScanToProjectUnifyRule INSTANCE =
-        new ScanToProjectUnifyRule();
-
-    private ScanToProjectUnifyRule() {
-      super(any(MutableScan.class),
-          any(MutableProject.class), 0);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      final MutableProject target = (MutableProject) call.target;
-      final MutableScan query = (MutableScan) call.query;
-      // We do not need to check query's parent type to avoid duplication
-      // of ProjectToProjectUnifyRule or FilterToProjectUnifyRule, since
-      // SubstitutionVisitor performs a top-down match.
-      if (!query.equals(target.getInput())) {
-        return null;
-      }
-      final RexShuttle shuttle = getRexShuttle(target);
-      final RexBuilder rexBuilder = target.cluster.getRexBuilder();
-      final List<RexNode> newProjects;
-      try {
-        newProjects = (List<RexNode>)
-            shuttle.apply(rexBuilder.identityProjects(query.getRowType()));
-      } catch (MatchFailed e) {
-        return null;
-      }
-      final MutableProject newProject =
-          MutableProject.of(
-              query.getRowType(), target, newProjects);
-      final MutableRel newProject2 = MutableRels.strip(newProject);
-      return call.result(newProject2);
-    }
-  }
-
-  /** Implementation of {@link UnifyRule} that matches
-   * {@link org.apache.calcite.rel.logical.LogicalProject}. */
-  private static class ProjectToProjectUnifyRule extends AbstractUnifyRule {
-    public static final ProjectToProjectUnifyRule INSTANCE =
-        new ProjectToProjectUnifyRule();
-
-    private ProjectToProjectUnifyRule() {
-      super(operand(MutableProject.class, query(0)),
-          operand(MutableProject.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      final MutableProject target = (MutableProject) call.target;
-      final MutableProject query = (MutableProject) call.query;
-      final RexShuttle shuttle = getRexShuttle(target);
-      final List<RexNode> newProjects;
-      try {
-        newProjects = shuttle.apply(query.getProjects());
-      } catch (MatchFailed e) {
-        return null;
-      }
-      final MutableProject newProject =
-          MutableProject.of(
-              query.getRowType(), target, newProjects);
-      final MutableRel newProject2 = MutableRels.strip(newProject);
-      return call.result(newProject2);
-    }
-  }
-
-
-  /** Implementation of {@link UnifyRule} that matches a {@link MutableFilter}
-   * to a {@link MutableProject}. */
-  private static class FilterToProjectUnifyRule extends AbstractUnifyRule {
-    public static final FilterToProjectUnifyRule INSTANCE =
-        new FilterToProjectUnifyRule();
-
-    private FilterToProjectUnifyRule() {
-      super(operand(MutableFilter.class, query(0)),
-          operand(MutableProject.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      // Child of projectTarget is equivalent to child of filterQuery.
-      try {
-        // TODO: make sure that constants are ok
-        final MutableProject target = (MutableProject) call.target;
-        final RexShuttle shuttle = getRexShuttle(target);
-        final RexNode newCondition;
-        final MutableFilter query = (MutableFilter) call.query;
-        try {
-          newCondition = query.getCondition().accept(shuttle);
-        } catch (MatchFailed e) {
-          return null;
-        }
-        final MutableFilter newFilter = MutableFilter.of(target, newCondition);
-        if (query.parent instanceof MutableProject) {
-          final MutableRel inverse =
-              invert(((MutableProject) query.parent).getNamedProjects(),
-                  newFilter, shuttle);
-          return call.create(query.parent).result(inverse);
-        } else {
-          final MutableRel inverse = invert(query, newFilter, target);
-          return call.result(inverse);
-        }
-      } catch (MatchFailed e) {
-        return null;
-      }
-    }
-
-    protected MutableRel invert(List<Pair<RexNode, String>> namedProjects,
-        MutableRel input,
-        RexShuttle shuttle) {
-      LOGGER.trace("SubstitutionVisitor: invert:\nprojects: {}\ninput: {}\nproject: {}\n",
-          namedProjects, input, shuttle);
-      final List<RexNode> exprList = new ArrayList<>();
-      final RexBuilder rexBuilder = input.cluster.getRexBuilder();
-      final List<RexNode> projects = Pair.left(namedProjects);
-      for (RexNode expr : projects) {
-        exprList.add(rexBuilder.makeZeroLiteral(expr.getType()));
-      }
-      for (Ord<RexNode> expr : Ord.zip(projects)) {
-        final RexNode node = expr.e.accept(shuttle);
-        if (node == null) {
-          throw MatchFailed.INSTANCE;
-        }
-        exprList.set(expr.i, node);
-      }
-      return MutableProject.of(input, exprList, Pair.right(namedProjects));
-    }
-
-    protected MutableRel invert(MutableRel model, MutableRel input,
-        MutableProject project) {
-      LOGGER.trace("SubstitutionVisitor: invert:\nmodel: {}\ninput: {}\nproject: {}\n",
-          model, input, project);
-      if (project.getProjects().size() < model.getRowType().getFieldCount()) {
-        throw MatchFailed.INSTANCE;
-      }
-      final List<RexNode> exprList = new ArrayList<>();
-      final RexBuilder rexBuilder = model.cluster.getRexBuilder();
-      for (RelDataTypeField field : model.getRowType().getFieldList()) {
-        exprList.add(rexBuilder.makeZeroLiteral(field.getType()));
-      }
-      for (Ord<RexNode> expr : Ord.zip(project.getProjects())) {
-        if (expr.e instanceof RexInputRef) {
-          final int target = ((RexInputRef) expr.e).getIndex();
-          exprList.set(target,
-              rexBuilder.ensureType(expr.e.getType(),
-                  RexInputRef.of(expr.i, input.rowType),
-                  false));
-        } else {
-          throw MatchFailed.INSTANCE;
-        }
-      }
-      return MutableProject.of(model.rowType, input, exprList);
-    }
-  }
-
-  /** Implementation of {@link UnifyRule} that matches a
-   * {@link MutableFilter}. */
-  private static class FilterToFilterUnifyRule extends AbstractUnifyRule {
-    public static final FilterToFilterUnifyRule INSTANCE =
-        new FilterToFilterUnifyRule();
-
-    private FilterToFilterUnifyRule() {
-      super(operand(MutableFilter.class, query(0)),
-          operand(MutableFilter.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      // in.query can be rewritten in terms of in.target if its condition
-      // is weaker. For example:
-      //   query: SELECT * FROM t WHERE x = 1 AND y = 2
-      //   target: SELECT * FROM t WHERE x = 1
-      // transforms to
-      //   result: SELECT * FROM (target) WHERE y = 2
-      final MutableFilter query = (MutableFilter) call.query;
-      final MutableFilter target = (MutableFilter) call.target;
-      final MutableFilter newFilter =
-          createFilter(query, target);
-      if (newFilter == null) {
-        return null;
-      }
-      return call.result(newFilter);
-    }
-
-    MutableFilter createFilter(MutableFilter query, MutableFilter target) {
-      final RexNode newCondition =
-          splitFilter(query.cluster.getRexBuilder(), query.getCondition(),
-              target.getCondition());
-      if (newCondition == null) {
-        // Could not map query onto target.
-        return null;
-      }
-      if (newCondition.isAlwaysTrue()) {
-        return target;
-      }
-      return MutableFilter.of(target, newCondition);
-    }
-  }
-
-  /** Implementation of {@link UnifyRule} that matches a {@link MutableProject}
-   * to a {@link MutableFilter}. */
-  private static class ProjectToFilterUnifyRule extends AbstractUnifyRule {
-    public static final ProjectToFilterUnifyRule INSTANCE =
-        new ProjectToFilterUnifyRule();
-
-    private ProjectToFilterUnifyRule() {
-      super(operand(MutableProject.class, query(0)),
-          operand(MutableFilter.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      if (call.query.parent instanceof MutableFilter) {
-        final UnifyRuleCall in2 = call.create(call.query.parent);
-        final MutableFilter query = (MutableFilter) in2.query;
-        final MutableFilter target = (MutableFilter) in2.target;
-        final MutableFilter newFilter =
-            FilterToFilterUnifyRule.INSTANCE.createFilter(
-                query, target);
-        if (newFilter == null) {
-          return null;
-        }
-        return in2.result(query.replaceInParent(newFilter));
-      }
-      return null;
-    }
-  }
-
-  /** Implementation of {@link UnifyRule} that matches a
-   * {@link org.apache.calcite.rel.logical.LogicalAggregate} to a
-   * {@link org.apache.calcite.rel.logical.LogicalAggregate}, provided
-   * that they have the same child. */
-  private static class AggregateToAggregateUnifyRule extends AbstractUnifyRule {
-    public static final AggregateToAggregateUnifyRule INSTANCE =
-        new AggregateToAggregateUnifyRule();
-
-    private AggregateToAggregateUnifyRule() {
-      super(operand(MutableAggregate.class, query(0)),
-          operand(MutableAggregate.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      final MutableAggregate query = (MutableAggregate) call.query;
-      final MutableAggregate target = (MutableAggregate) call.target;
-      assert query != target;
-      // in.query can be rewritten in terms of in.target if its groupSet is
-      // a subset, and its aggCalls are a superset. For example:
-      //   query: SELECT x, COUNT(b) FROM t GROUP BY x
-      //   target: SELECT x, y, SUM(a) AS s, COUNT(b) AS cb FROM t GROUP BY x, y
-      // transforms to
-      //   result: SELECT x, SUM(cb) FROM (target) GROUP BY x
-      if (!target.getGroupSet().contains(query.getGroupSet())) {
-        return null;
-      }
-      MutableRel result = unifyAggregates(query, target);
-      if (result == null) {
-        return null;
-      }
-      return call.result(result);
-    }
-  }
-
-  public static MutableAggregate permute(MutableAggregate aggregate,
-      MutableRel input, Mapping mapping) {
-    ImmutableBitSet groupSet = Mappings.apply(mapping, aggregate.getGroupSet());
-    ImmutableList<ImmutableBitSet> groupSets =
-        Mappings.apply2(mapping, aggregate.getGroupSets());
-    List<AggregateCall> aggregateCalls =
-        apply(mapping, aggregate.getAggCallList());
-    return MutableAggregate.of(input, aggregate.indicator, groupSet, groupSets,
-        aggregateCalls);
-  }
-
-  private static List<AggregateCall> apply(final Mapping mapping,
-      List<AggregateCall> aggCallList) {
-    return Lists.transform(aggCallList,
-        new Function<AggregateCall, AggregateCall>() {
-          public AggregateCall apply(AggregateCall call) {
-            return call.copy(Mappings.apply2(mapping, call.getArgList()),
-                Mappings.apply(mapping, call.filterArg));
-          }
-        });
-  }
-
-  public static MutableRel unifyAggregates(MutableAggregate query,
-      MutableAggregate target) {
-    if (query.getGroupType() != Aggregate.Group.SIMPLE
-        || target.getGroupType() != Aggregate.Group.SIMPLE) {
-      throw new AssertionError(Bug.CALCITE_461_FIXED);
-    }
-    MutableRel result;
-    if (query.getGroupSet().equals(target.getGroupSet())) {
-      // Same level of aggregation. Generate a project.
-      final List<Integer> projects = Lists.newArrayList();
-      final int groupCount = query.getGroupSet().cardinality();
-      for (int i = 0; i < groupCount; i++) {
-        projects.add(i);
-      }
-      for (AggregateCall aggregateCall : query.getAggCallList()) {
-        int i = target.getAggCallList().indexOf(aggregateCall);
-        if (i < 0) {
-          return null;
-        }
-        projects.add(groupCount + i);
-      }
-      result = MutableRels.createProject(target, projects);
-    } else {
-      // Target is coarser level of aggregation. Generate an aggregate.
-      final ImmutableBitSet.Builder groupSet = ImmutableBitSet.builder();
-      final List<Integer> targetGroupList = target.getGroupSet().asList();
-      for (int c : query.getGroupSet()) {
-        int c2 = targetGroupList.indexOf(c);
-        if (c2 < 0) {
-          return null;
-        }
-        groupSet.set(c2);
-      }
-      final List<AggregateCall> aggregateCalls = Lists.newArrayList();
-      for (AggregateCall aggregateCall : query.getAggCallList()) {
-        if (aggregateCall.isDistinct()) {
-          return null;
-        }
-        int i = target.getAggCallList().indexOf(aggregateCall);
-        if (i < 0) {
-          return null;
-        }
-        aggregateCalls.add(
-            AggregateCall.create(getRollup(aggregateCall.getAggregation()),
-                aggregateCall.isDistinct(),
-                ImmutableList.of(target.groupSet.cardinality() + i), -1,
-                aggregateCall.type, aggregateCall.name));
-      }
-      result = MutableAggregate.of(target, false, groupSet.build(), null,
-          aggregateCalls);
-    }
-    return MutableRels.createCastRel(result, query.getRowType(), true);
-  }
-
-  /** Implementation of {@link UnifyRule} that matches a
-   * {@link MutableAggregate} on
-   * a {@link MutableProject} query to an {@link MutableAggregate} target.
-   *
-   * <p>The rule is necessary when we unify query=Aggregate(x) with
-   * target=Aggregate(x, y). Query will tend to have an extra Project(x) on its
-   * input, which this rule knows is safe to ignore.</p> */
-  private static class AggregateOnProjectToAggregateUnifyRule
-      extends AbstractUnifyRule {
-    public static final AggregateOnProjectToAggregateUnifyRule INSTANCE =
-        new AggregateOnProjectToAggregateUnifyRule();
-
-    private AggregateOnProjectToAggregateUnifyRule() {
-      super(
-          operand(MutableAggregate.class,
-              operand(MutableProject.class, query(0))),
-          operand(MutableAggregate.class, target(0)), 1);
-    }
-
-    public UnifyResult apply(UnifyRuleCall call) {
-      final MutableAggregate query = (MutableAggregate) call.query;
-      final MutableAggregate target = (MutableAggregate) call.target;
-      if (!(query.getInput() instanceof MutableProject)) {
-        return null;
-      }
-      final MutableProject project = (MutableProject) query.getInput();
-      if (project.getInput() != target.getInput()) {
-        return null;
-      }
-      final Mappings.TargetMapping mapping = project.getMapping();
-      if (mapping == null) {
-        return null;
-      }
-      final MutableAggregate aggregate2 =
-          permute(query, project.getInput(), mapping.inverse());
-      final MutableRel result = unifyAggregates(aggregate2, target);
-      return result == null ? null : call.result(result);
-    }
-  }
-
-  public static SqlAggFunction getRollup(SqlAggFunction aggregation) {
-    if (aggregation == SqlStdOperatorTable.SUM
-        || aggregation == SqlStdOperatorTable.MIN
-        || aggregation == SqlStdOperatorTable.MAX
-        || aggregation == SqlStdOperatorTable.SUM0) {
-      return aggregation;
-    } else if (aggregation == SqlStdOperatorTable.COUNT) {
-      return SqlStdOperatorTable.SUM0;
-    } else {
-      return null;
-    }
-  }
-
-  /** Builds a shuttle that stores a list of expressions, and can map incoming
-   * expressions to references to them. */
-  protected static RexShuttle getRexShuttle(MutableProject target) {
-    final Map<String, Integer> map = new HashMap<>();
-    for (RexNode e : target.getProjects()) {
-      map.put(e.toString(), map.size());
-    }
-    return new RexShuttle() {
-      @Override public RexNode visitInputRef(RexInputRef ref) {
-        final Integer integer = map.get(ref.getName());
-        if (integer != null) {
-          return new RexInputRef(integer, ref.getType());
-        }
-        throw MatchFailed.INSTANCE;
-      }
-
-      @Override public RexNode visitCall(RexCall call) {
-        final Integer integer = map.get(call.toString());
-        if (integer != null) {
-          return new RexInputRef(integer, call.getType());
-        }
-        return super.visitCall(call);
-      }
-    };
-  }
-
-  /** Type of {@code MutableRel}. */
-  private enum MutableRelType {
-    SCAN,
-    PROJECT,
-    FILTER,
-    AGGREGATE,
-    SORT,
-    UNION,
-    JOIN,
-    HOLDER,
-    VALUES
-  }
-
-  /** Visitor over {@link MutableRel}. */
-  private static class MutableRelVisitor {
-    private MutableRel root;
-
-    public void visit(MutableRel node) {
-      node.childrenAccept(this);
-    }
-
-    public MutableRel go(MutableRel p) {
-      this.root = p;
-      visit(p);
-      return root;
-    }
-  }
-
-  /** Mutable equivalent of {@link RelNode}.
-   *
-   * <p>Each node has mutable state, and keeps track of its parent and position
-   * within parent.
-   * It doesn't make sense to canonize {@code MutableRels},
-   * otherwise one node could end up with multiple parents.
-   * It follows that {@code #hashCode} and {@code #equals} are less efficient
-   * than their {@code RelNode} counterparts.
-   * But, you don't need to copy a {@code MutableRel} in order to change it.
-   * For this reason, you should use {@code MutableRel} for short-lived
-   * operations, and transcribe back to {@code RelNode} when you are done.</p>
-   */
-  protected abstract static class MutableRel {
-    MutableRel parent;
-    int ordinalInParent;
-    public final RelOptCluster cluster;
-    final RelDataType rowType;
-    final MutableRelType type;
-
-    private MutableRel(RelOptCluster cluster, RelDataType rowType,
-        MutableRelType type) {
-      this.cluster = cluster;
-      this.rowType = rowType;
-      this.type = type;
-    }
-
-    public RelDataType getRowType() {
-      return rowType;
-    }
-
-    public abstract void setInput(int ordinalInParent, MutableRel input);
-
-    public abstract List<MutableRel> getInputs();
-
-    public abstract void childrenAccept(MutableRelVisitor visitor);
-
-    /** Replaces this {@code MutableRel} in its parent with another node at the
-     * same position.
-     *
-     * <p>Before the method, {@code child} must be an orphan (have null parent)
-     * and after this method, this {@code MutableRel} is an orphan.
-     *
-     * @return The parent
-     */
-    public MutableRel replaceInParent(MutableRel child) {
-      final MutableRel parent = this.parent;
-      if (this != child) {
-/*
-        if (child.parent != null) {
-          child.parent.setInput(child.ordinalInParent, null);
-          child.parent = null;
-        }
-*/
-        if (parent != null) {
-          parent.setInput(ordinalInParent, child);
-          this.parent = null;
-          this.ordinalInParent = 0;
-        }
-      }
-      return parent;
-    }
-
-    public abstract StringBuilder digest(StringBuilder buf);
-
-    public final String deep() {
-      return new MutableRelDumper().apply(this);
-    }
-
-    @Override public final String toString() {
-      return deep();
-    }
-
-    public MutableRel getParent() { return parent; }
-  }
-
-  /** Implementation of {@link MutableRel} whose only purpose is to have a
-   * child. Used as the root of a tree. */
-  private static class Holder extends MutableSingleRel {
-    private Holder(MutableRelType type, RelDataType rowType, MutableRel input) {
-      super(type, rowType, input);
-    }
-
-    static Holder of(MutableRel input) {
-      return new Holder(MutableRelType.HOLDER, input.rowType, input);
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Holder");
-    }
-  }
-
-   /** Abstract base class for implementations of {@link MutableRel} that have
-   * no inputs. */
-  protected abstract static class MutableLeafRel extends MutableRel {
-    protected final RelNode rel;
-
-    MutableLeafRel(MutableRelType type, RelNode rel) {
-      super(rel.getCluster(), rel.getRowType(), type);
-      this.rel = rel;
-    }
-
-    public void setInput(int ordinalInParent, MutableRel input) {
-      throw new IllegalArgumentException();
-    }
-
-    public List<MutableRel> getInputs() {
-      return ImmutableList.of();
-    }
-
-    public void childrenAccept(MutableRelVisitor visitor) {
-      // no children - nothing to do
-    }
-  }
-
-  /** Mutable equivalent of {@link SingleRel}. */
-  protected abstract static class MutableSingleRel extends MutableRel {
-    protected MutableRel input;
-
-    MutableSingleRel(MutableRelType type, RelDataType rowType,
-        MutableRel input) {
-      super(input.cluster, rowType, type);
-      this.input = input;
-      input.parent = this;
-      input.ordinalInParent = 0;
-    }
-
-    public void setInput(int ordinalInParent, MutableRel input) {
-      if (ordinalInParent >= 1) {
-        throw new IllegalArgumentException();
-      }
-      this.input = input;
-      if (input != null) {
-        input.parent = this;
-        input.ordinalInParent = 0;
-      }
-    }
-
-    public List<MutableRel> getInputs() {
-      return ImmutableList.of(input);
-    }
-
-    public void childrenAccept(MutableRelVisitor visitor) {
-      visitor.visit(input);
-    }
-
-    public MutableRel getInput() {
-      return input;
-    }
-  }
-
-  /** Mutable equivalent of
-   * {@link org.apache.calcite.rel.logical.LogicalTableScan}. */
-  protected static class MutableScan extends MutableLeafRel {
-    private MutableScan(TableScan rel) {
-      super(MutableRelType.SCAN, rel);
-    }
-
-    static MutableScan of(TableScan rel) {
-      return new MutableScan(rel);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableScan
-          && rel.getTable().equals(((MutableScan) obj).rel.getTable());
-    }
-
-    @Override public int hashCode() {
-      return rel.getTable().hashCode();
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Scan(table: ")
-          .append(rel.getTable().getQualifiedName()).append(")");
-    }
-  }
-
-  /** Mutable equivalent of {@link org.apache.calcite.rel.core.Values}. */
-  protected static class MutableValues extends MutableLeafRel {
-    private MutableValues(Values rel) {
-      super(MutableRelType.VALUES, rel);
-    }
-
-    static MutableValues of(Values rel) {
-      return new MutableValues(rel);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableValues
-          && rel == ((MutableValues) obj).rel;
-    }
-
-    @Override public int hashCode() {
-      return rel.hashCode();
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Values(tuples: ")
-          .append(((Values) rel).getTuples()).append(")");
-    }
-  }
-
-  /** Mutable equivalent of
-   * {@link org.apache.calcite.rel.logical.LogicalProject}. */
-  protected static class MutableProject extends MutableSingleRel {
-    private final List<RexNode> projects;
-
-    private MutableProject(RelDataType rowType, MutableRel input,
-        List<RexNode> projects) {
-      super(MutableRelType.PROJECT, rowType, input);
-      this.projects = projects;
-      assert RexUtil.compatibleTypes(projects, rowType, Litmus.THROW);
-    }
-
-    public static MutableProject of(RelDataType rowType, MutableRel input,
-        List<RexNode> projects) {
-      return new MutableProject(rowType, input, projects);
-    }
-
-    /** Equivalent to
-     * {@link RelOptUtil#createProject(org.apache.calcite.rel.RelNode, java.util.List, java.util.List)}
-     * for {@link MutableRel}. */
-    public static MutableRel of(MutableRel child, List<RexNode> exprList,
-        List<String> fieldNameList) {
-      final RelDataType rowType =
-          RexUtil.createStructType(child.cluster.getTypeFactory(), exprList,
-              fieldNameList, SqlValidatorUtil.F_SUGGESTER);
-      return of(rowType, child, exprList);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableProject
-          && PAIRWISE_STRING_EQUIVALENCE.equivalent(
-              projects, ((MutableProject) obj).projects)
-          && input.equals(((MutableProject) obj).input);
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(input,
-          PAIRWISE_STRING_EQUIVALENCE.hash(projects));
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Project(projects: ").append(projects).append(")");
-    }
-
-    public List<RexNode> getProjects() {
-      return projects;
-    }
-
-    /** Returns a list of (expression, name) pairs. */
-    public final List<Pair<RexNode, String>> getNamedProjects() {
-      return Pair.zip(getProjects(), getRowType().getFieldNames());
-    }
-
-    public Mappings.TargetMapping getMapping() {
-      return Project.getMapping(
-          input.getRowType().getFieldCount(), projects);
-    }
-  }
-
-  /** Mutable equivalent of
-   * {@link org.apache.calcite.rel.logical.LogicalFilter}. */
-  protected static class MutableFilter extends MutableSingleRel {
-    private final RexNode condition;
-
-    private MutableFilter(MutableRel input, RexNode condition) {
-      super(MutableRelType.FILTER, input.rowType, input);
-      this.condition = condition;
-    }
-
-    public static MutableFilter of(MutableRel input, RexNode condition) {
-      return new MutableFilter(input, condition);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableFilter
-          && condition.toString().equals(
-              ((MutableFilter) obj).condition.toString())
-          && input.equals(((MutableFilter) obj).input);
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(input, condition.toString());
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Filter(condition: ").append(condition).append(")");
-    }
-
-    public RexNode getCondition() {
-      return condition;
-    }
-  }
-
-  /** Mutable equivalent of
-   * {@link org.apache.calcite.rel.logical.LogicalAggregate}. */
-  protected static class MutableAggregate extends MutableSingleRel {
-    public final boolean indicator;
-    private final ImmutableBitSet groupSet;
-    private final ImmutableList<ImmutableBitSet> groupSets;
-    private final List<AggregateCall> aggCalls;
-
-    private MutableAggregate(MutableRel input, RelDataType rowType,
-        boolean indicator, ImmutableBitSet groupSet,
-        List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
-      super(MutableRelType.AGGREGATE, rowType, input);
-      this.indicator = indicator;
-      this.groupSet = groupSet;
-      this.groupSets = groupSets == null
-          ? ImmutableList.of(groupSet)
-          : ImmutableList.copyOf(groupSets);
-      this.aggCalls = aggCalls;
-    }
-
-    static MutableAggregate of(MutableRel input, boolean indicator,
-        ImmutableBitSet groupSet, ImmutableList<ImmutableBitSet> groupSets,
-        List<AggregateCall> aggCalls) {
-      RelDataType rowType =
-          Aggregate.deriveRowType(input.cluster.getTypeFactory(),
-              input.getRowType(), indicator, groupSet, groupSets, aggCalls);
-      return new MutableAggregate(input, rowType, indicator, groupSet,
-          groupSets, aggCalls);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableAggregate
-          && groupSet.equals(((MutableAggregate) obj).groupSet)
-          && aggCalls.equals(((MutableAggregate) obj).aggCalls)
-          && input.equals(((MutableAggregate) obj).input);
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(input, groupSet, aggCalls);
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Aggregate(groupSet: ").append(groupSet)
-          .append(", groupSets: ").append(groupSets)
-          .append(", calls: ").append(aggCalls).append(")");
-    }
-
-    public ImmutableBitSet getGroupSet() {
-      return groupSet;
-    }
-
-    public ImmutableList<ImmutableBitSet> getGroupSets() {
-      return groupSets;
-    }
-
-    public List<AggregateCall> getAggCallList() {
-      return aggCalls;
-    }
-
-    public Aggregate.Group getGroupType() {
-      return Aggregate.Group.induce(groupSet, groupSets);
-    }
-  }
-
-  /** Mutable equivalent of {@link org.apache.calcite.rel.core.Sort}. */
-  protected static class MutableSort extends MutableSingleRel {
-    private final RelCollation collation;
-    private final RexNode offset;
-    private final RexNode fetch;
-
-    private MutableSort(MutableRel input, RelCollation collation,
-        RexNode offset, RexNode fetch) {
-      super(MutableRelType.SORT, input.rowType, input);
-      this.collation = collation;
-      this.offset = offset;
-      this.fetch = fetch;
-    }
-
-    static MutableSort of(MutableRel input, RelCollation collation,
-        RexNode offset, RexNode fetch) {
-      return new MutableSort(input, collation, offset, fetch);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableSort
-          && collation.equals(((MutableSort) obj).collation)
-          && Objects.equals(offset, ((MutableSort) obj).offset)
-          && Objects.equals(fetch, ((MutableSort) obj).fetch)
-          && input.equals(((MutableSort) obj).input);
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(input, collation, offset, fetch);
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      buf.append("Sort(collation: ").append(collation);
-      if (offset != null) {
-        buf.append(", offset: ").append(offset);
-      }
-      if (fetch != null) {
-        buf.append(", fetch: ").append(fetch);
-      }
-      return buf.append(")");
-    }
-  }
-
-  /** Base class for set-operations. */
-  protected abstract static class MutableSetOp extends MutableRel {
-    protected final List<MutableRel> inputs;
-
-    private MutableSetOp(RelOptCluster cluster, RelDataType rowType,
-        MutableRelType type, List<MutableRel> inputs) {
-      super(cluster, rowType, type);
-      this.inputs = inputs;
-    }
-
-    @Override public void setInput(int ordinalInParent, MutableRel input) {
-      inputs.set(ordinalInParent, input);
-      if (input != null) {
-        input.parent = this;
-        input.ordinalInParent = ordinalInParent;
-      }
-    }
-
-    @Override public List<MutableRel> getInputs() {
-      return inputs;
-    }
-
-    @Override public void childrenAccept(MutableRelVisitor visitor) {
-      for (MutableRel input : inputs) {
-        visitor.visit(input);
-      }
-    }
-  }
-
-  /** Mutable equivalent of
-   * {@link org.apache.calcite.rel.logical.LogicalUnion}. */
-  protected static class MutableUnion extends MutableSetOp {
-    public boolean all;
-
-    private MutableUnion(RelOptCluster cluster, RelDataType rowType,
-        List<MutableRel> inputs, boolean all) {
-      super(cluster, rowType, MutableRelType.UNION, inputs);
-      this.all = all;
-    }
-
-    static MutableUnion of(List<MutableRel> inputs, boolean all) {
-      assert inputs.size() >= 2;
-      final MutableRel input0 = inputs.get(0);
-      return new MutableUnion(input0.cluster, input0.rowType, inputs, all);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableUnion
-          && inputs.equals(((MutableUnion) obj).getInputs());
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(type, inputs);
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Union");
-    }
-  }
-
-  /** Base Class for relations with two inputs */
-  private abstract static class MutableBiRel extends MutableRel {
-    protected MutableRel left;
-    protected MutableRel right;
-
-    MutableBiRel(MutableRelType type, RelOptCluster cluster, RelDataType rowType,
-                        MutableRel left, MutableRel right) {
-      super(cluster, rowType, type);
-      this.left = left;
-      left.parent = this;
-      left.ordinalInParent = 0;
-
-      this.right = right;
-      right.parent = this;
-      right.ordinalInParent = 1;
-    }
-
-    public void setInput(int ordinalInParent, MutableRel input) {
-      if (ordinalInParent > 1) {
-        throw new IllegalArgumentException();
-      }
-      if (ordinalInParent == 0) {
-        this.left = input;
-      } else {
-        this.right = input;
-      }
-      if (input != null) {
-        input.parent = this;
-        input.ordinalInParent = ordinalInParent;
-      }
-    }
-
-    public List<MutableRel> getInputs() {
-      return ImmutableList.of(left, right);
-    }
-
-    public MutableRel getLeft() {
-      return left;
-    }
-
-    public MutableRel getRight() {
-      return right;
-    }
-
-    public void childrenAccept(MutableRelVisitor visitor) {
-
-      visitor.visit(left);
-      visitor.visit(right);
-    }
-  }
-
-  /** Mutable equivalent of
-   * {@link org.apache.calcite.rel.logical.LogicalJoin}. */
-  private static class MutableJoin extends MutableBiRel {
-    //~ Instance fields --------------------------------------------------------
-
-    protected final RexNode condition;
-    protected final ImmutableSet<CorrelationId> variablesSet;
-
-    /**
-     * Values must be of enumeration {@link JoinRelType}, except that
-     * {@link JoinRelType#RIGHT} is disallowed.
-     */
-    protected JoinRelType joinType;
-
-    private MutableJoin(
-        RelDataType rowType,
-        MutableRel left,
-        MutableRel right,
-        RexNode condition,
-        JoinRelType joinType,
-        Set<CorrelationId> variablesSet) {
-      super(MutableRelType.JOIN, left.cluster, rowType, left, right);
-      this.condition = Preconditions.checkNotNull(condition);
-      this.variablesSet = ImmutableSet.copyOf(variablesSet);
-      this.joinType = Preconditions.checkNotNull(joinType);
-    }
-
-    public RexNode getCondition() {
-      return condition;
-    }
-
-    public JoinRelType getJoinType() {
-      return joinType;
-    }
-
-    public ImmutableSet<CorrelationId> getVariablesSet() {
-      return variablesSet;
-    }
-
-    static MutableJoin of(RelOptCluster cluster, MutableRel left,
-        MutableRel right, RexNode condition, JoinRelType joinType,
-        Set<CorrelationId> variablesStopped) {
-      List<RelDataTypeField> fieldList = Collections.emptyList();
-      RelDataType rowType =
-          SqlValidatorUtil.deriveJoinRowType(left.getRowType(),
-              right.getRowType(), joinType, cluster.getTypeFactory(), null,
-              fieldList);
-      return new MutableJoin(rowType, left, right, condition, joinType,
-          variablesStopped);
-    }
-
-    @Override public boolean equals(Object obj) {
-      return obj == this
-          || obj instanceof MutableJoin
-          && joinType == ((MutableJoin) obj).joinType
-          && condition.toString().equals(
-              ((MutableJoin) obj).condition.toString())
-          && left.equals(((MutableJoin) obj).left)
-          && right.equals(((MutableJoin) obj).right);
-    }
-
-    @Override public int hashCode() {
-      return Objects.hash(left, right, condition.toString(), joinType);
-    }
-
-    @Override public StringBuilder digest(StringBuilder buf) {
-      return buf.append("Join(left: ").append(left)
-          .append(", right:").append(right)
-          .append(")");
-    }
-  }
-
-  /** Utilities for dealing with {@link MutableRel}s. */
-  protected static class MutableRels {
-    public static boolean contains(MutableRel ancestor,
-        final MutableRel target) {
-      if (ancestor.equals(target)) {
-        // Short-cut common case.
-        return true;
-      }
-      try {
-        new MutableRelVisitor() {
-          @Override public void visit(MutableRel node) {
-            if (node.equals(target)) {
-              throw Util.FoundOne.NULL;
-            }
-            super.visit(node);
-          }
-          // CHECKSTYLE: IGNORE 1
-        }.go(ancestor);
-        return false;
-      } catch (Util.FoundOne e) {
-        return true;
-      }
-    }
-
-    public static MutableRel preOrderTraverseNext(MutableRel node) {
-      MutableRel parent = node.getParent();
-      int ordinal = node.ordinalInParent + 1;
-      while (parent != null) {
-        if (parent.getInputs().size() > ordinal) {
-          return parent.getInputs().get(ordinal);
-        }
-        node = parent;
-        parent = node.getParent();
-        ordinal = node.ordinalInParent + 1;
-      }
-      return null;
-    }
-
-    private static List<MutableRel> descendants(MutableRel query) {
-      final List<MutableRel> list = new ArrayList<>();
-      descendantsRecurse(list, query);
-      return list;
-    }
-
-    private static void descendantsRecurse(List<MutableRel> list,
-        MutableRel rel) {
-      list.add(rel);
-      for (MutableRel input : rel.getInputs()) {
-        descendantsRecurse(list, input);
-      }
-    }
-
-    /** Returns whether two relational expressions have the same row-type. */
-    public static boolean equalType(String desc0, MutableRel rel0, String desc1,
-        MutableRel rel1, Litmus litmus) {
-      return RelOptUtil.equal(desc0, rel0.getRowType(),
-          desc1, rel1.getRowType(), litmus);
-    }
-
-    /** Within a relational expression {@code query}, replaces occurrences of
-     * {@code find} with {@code replace}.
-     *
-     * <p>Assumes relational expressions (and their descendants) are not null.
-     * Does not handle cycles. */
-    public static Replacement replace(MutableRel query, MutableRel find,
-        MutableRel replace) {
-      if (find.equals(replace)) {
-        // Short-cut common case.
-        return null;
-      }
-      assert equalType("find", find, "replace", replace, Litmus.THROW);
-      return replaceRecurse(query, find, replace);
-    }
-
-    /** Helper for {@link #replace}. */
-    private static Replacement replaceRecurse(MutableRel query,
-        MutableRel find, MutableRel replace) {
-      if (find.equals(query)) {
-        query.replaceInParent(replace);
-        return new Replacement(query, replace);
-      }
-      for (MutableRel input : query.getInputs()) {
-        Replacement r = replaceRecurse(input, find, replace);
-        if (r != null) {
-          return r;
-        }
-      }
-      return null;
-    }
-
-    /** Based on
-     * {@link org.apache.calcite.rel.rules.ProjectRemoveRule#strip}. */
-    public static MutableRel strip(MutableProject project) {
-      return isTrivial(project) ? project.getInput() : project;
-    }
-
-    /** Based on
-     * {@link org.apache.calcite.rel.rules.ProjectRemoveRule#isTrivial(org.apache.calcite.rel.core.Project)}. */
-    public static boolean isTrivial(MutableProject project) {
-      MutableRel child = project.getInput();
-      final RelDataType childRowType = child.getRowType();
-      return RexUtil.isIdentity(project.getProjects(), childRowType);
-    }
-
-    /** Equivalent to
-     * {@link RelOptUtil#createProject(org.apache.calcite.rel.RelNode, java.util.List)}
-     * for {@link MutableRel}. */
-    public static MutableRel createProject(final MutableRel child,
-        final List<Integer> posList) {
-      final RelDataType rowType = child.getRowType();
-      if (Mappings.isIdentity(posList, rowType.getFieldCount())) {
-        return child;
-      }
-      return MutableProject.of(
-          RelOptUtil.permute(child.cluster.getTypeFactory(), rowType,
-              Mappings.bijection(posList)),
-          child,
-          new AbstractList<RexNode>() {
-            public int size() {
-              return posList.size();
-            }
-
-            public RexNode get(int index) {
-              final int pos = posList.get(index);
-              return RexInputRef.of(pos, rowType);
-            }
-          });
-    }
-
-    /** Equivalence to {@link org.apache.calcite.plan.RelOptUtil#createCastRel}
-     * for {@link MutableRel}. */
-    public static MutableRel createCastRel(MutableRel rel,
-        RelDataType castRowType, boolean rename) {
-      RelDataType rowType = rel.getRowType();
-      if (RelOptUtil.areRowTypesEqual(rowType, castRowType, rename)) {
-        // nothing to do
-        return rel;
-      }
-      List<RexNode> castExps =
-          RexUtil.generateCastExpressions(rel.cluster.getRexBuilder(),
-              castRowType, rowType);
-      final List<String> fieldNames =
-          rename ? castRowType.getFieldNames() : rowType.getFieldNames();
-      return MutableProject.of(rel, castExps, fieldNames);
-    }
-  }
-
-  /** Visitor that prints an indented tree of {@link MutableRel}s. */
-  protected static class MutableRelDumper extends MutableRelVisitor {
-    private final StringBuilder buf = new StringBuilder();
-    private int level;
-
-    @Override public void visit(MutableRel node) {
-      Spaces.append(buf, level * 2);
-      if (node == null) {
-        buf.append("null");
-      } else {
-        node.digest(buf);
-        buf.append("\n");
-        ++level;
-        super.visit(node);
-        --level;
-      }
-    }
-
-    public String apply(MutableRel rel) {
-      go(rel);
-      return buf.toString();
-    }
-  }
-
-  /** Returns if one rel is weaker than another. */
-  protected boolean isWeaker(MutableRel rel0, MutableRel rel) {
-    if (rel0 == rel || equivalents.get(rel0).contains(rel)) {
-      return false;
-    }
-
-    if (!(rel0 instanceof MutableFilter)
-        || !(rel instanceof MutableFilter)) {
-      return false;
-    }
-
-    if (!rel.getRowType().equals(rel0.getRowType())) {
-      return false;
-    }
-
-    final MutableRel rel0input = ((MutableFilter) rel0).getInput();
-    final MutableRel relinput = ((MutableFilter) rel).getInput();
-    if (rel0input != relinput
-        && !equivalents.get(rel0input).contains(relinput)) {
-      return false;
-    }
-
-    RexExecutorImpl rexImpl =
-        (RexExecutorImpl) (rel.cluster.getPlanner().getExecutor());
-    RexImplicationChecker rexImplicationChecker = new RexImplicationChecker(
-        rel.cluster.getRexBuilder(),
-        rexImpl, rel.getRowType());
-
-    return rexImplicationChecker.implies(((MutableFilter) rel0).getCondition(),
-        ((MutableFilter) rel).getCondition());
-  }
-
-  /** Operand to a {@link UnifyRule}. */
-  protected abstract static class Operand {
-    protected final Class<? extends MutableRel> clazz;
-
-    protected Operand(Class<? extends MutableRel> clazz) {
-      this.clazz = clazz;
-    }
-
-    public abstract boolean matches(SubstitutionVisitor visitor, MutableRel rel);
-
-    public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) {
-      return false;
-    }
-  }
-
-  /** Operand to a {@link UnifyRule} that matches a relational expression of a
-   * given type. It has zero or more child operands. */
-  private static class InternalOperand extends Operand {
-    private final List<Operand> inputs;
-
-    InternalOperand(Class<? extends MutableRel> clazz, List<Operand> inputs) {
-      super(clazz);
-      this.inputs = inputs;
-    }
-
-    @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) {
-      return clazz.isInstance(rel)
-          && allMatch(visitor, inputs, rel.getInputs());
-    }
-
-    @Override public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) {
-      return clazz.isInstance(rel)
-          && allWeaker(visitor, inputs, rel.getInputs());
-    }
-    private static boolean allMatch(SubstitutionVisitor visitor,
-        List<Operand> operands, List<MutableRel> rels) {
-      if (operands.size() != rels.size()) {
-        return false;
-      }
-      for (Pair<Operand, MutableRel> pair : Pair.zip(operands, rels)) {
-        if (!pair.left.matches(visitor, pair.right)) {
-          return false;
-        }
-      }
-      return true;
-    }
-
-    private static boolean allWeaker(
-        SubstitutionVisitor visitor,
-        List<Operand> operands, List<MutableRel> rels) {
-      if (operands.size() != rels.size()) {
-        return false;
-      }
-      for (Pair<Operand, MutableRel> pair : Pair.zip(operands, rels)) {
-        if (!pair.left.isWeaker(visitor, pair.right)) {
-          return false;
-        }
-      }
-      return true;
-    }
-  }
-
-  /** Operand to a {@link UnifyRule} that matches a relational expression of a
-   * given type. */
-  private static class AnyOperand extends Operand {
-    AnyOperand(Class<? extends MutableRel> clazz) {
-      super(clazz);
-    }
-
-    @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) {
-      return clazz.isInstance(rel);
-    }
-  }
-
-  /** Operand that assigns a particular relational expression to a variable.
-   *
-   * <p>It is applied to a descendant of the query, writes the operand into the
-   * slots array, and always matches.
-   * There is a corresponding operand of type {@link TargetOperand} that checks
-   * whether its relational expression, a descendant of the target, is
-   * equivalent to this {@code QueryOperand}'s relational expression.
-   */
-  private static class QueryOperand extends Operand {
-    private final int ordinal;
-
-    protected QueryOperand(int ordinal) {
-      super(MutableRel.class);
-      this.ordinal = ordinal;
-    }
-
-    @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) {
-      visitor.slots[ordinal] = rel;
-      return true;
-    }
-  }
-
-  /** Operand that checks that a relational expression matches the corresponding
-   * relational expression that was passed to a {@link QueryOperand}. */
-  private static class TargetOperand extends Operand {
-    private final int ordinal;
-
-    protected TargetOperand(int ordinal) {
-      super(MutableRel.class);
-      this.ordinal = ordinal;
-    }
-
-    @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) {
-      final MutableRel rel0 = visitor.slots[ordinal];
-      assert rel0 != null : "QueryOperand should have been called first";
-      return rel0 == rel || visitor.equivalents.get(rel0).contains(rel);
-    }
-
-    @Override public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) {
-      final MutableRel rel0 = visitor.slots[ordinal];
-      assert rel0 != null : "QueryOperand should have been called first";
-      return visitor.isWeaker(rel0, rel);
-    }
-  }
-
-  /** Visitor that counts how many {@link QueryOperand} and
-   * {@link TargetOperand} in an operand tree. */
-  private static class SlotCounter {
-    int queryCount;
-    int targetCount;
-
-    void visit(Operand operand) {
-      if (operand instanceof QueryOperand) {
-        ++queryCount;
-      } else if (operand instanceof TargetOperand) {
-        ++targetCount;
-      } else if (operand instanceof AnyOperand) {
-        // nothing
-      } else {
-        for (Operand input : ((InternalOperand) operand).inputs) {
-          visit(input);
-        }
-      }
-    }
-  }
-
-  /**
-   * Rule that converts a {@link org.apache.calcite.rel.logical.LogicalFilter}
-   * on top of a {@link org.apache.calcite.rel.logical.LogicalProject} into a
-   * trivial filter (on a boolean column).
-   */
-  public static class FilterOnProjectRule extends RelOptRule {
-    private static final Predicate<Filter> PREDICATE =
-        new Predicate<Filter>() {
-          public boolean apply(Filter input) {
-            return input.getCondition() instanceof RexInputRef;
-          }
-        };
-
-    public static final FilterOnProjectRule INSTANCE =
-        new FilterOnProjectRule();
-
-    private FilterOnProjectRule() {
-      super(
-          operand(Filter.class, null, PREDICATE,
-              some(operand(Project.class, any()))));
-    }
-
-    public void onMatch(RelOptRuleCall call) {
-      final Filter filter = call.rel(0);
-      final Project project = call.rel(1);
-
-      final List<RexNode> newProjects = new ArrayList<>(project.getProjects());
-      newProjects.add(filter.getCondition());
-
-      final RelOptCluster cluster = filter.getCluster();
-      RelDataType newRowType =
-          cluster.getTypeFactory().builder()
-              .addAll(project.getRowType().getFieldList())
-              .add("condition", Util.last(newProjects).getType())
-              .build();
-      final RelNode newProject =
-          project.copy(project.getTraitSet(),
-              project.getInput(),
-              newProjects,
-              newRowType);
-
-      final RexInputRef newCondition =
-          cluster.getRexBuilder().makeInputRef(newProject,
-              newProjects.size() - 1);
-
-      call.transformTo(filter.copy(filter.getTraitSet(), newProject, newCondition));
-    }
-  }
-}
-
-// End SubstitutionVisitor.java


[07/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/druid_topn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out
index 5fa6446..052be15 100644
--- a/ql/src/test/results/clientpositive/druid_topn.q.out
+++ b/ql/src/test/results/clientpositive/druid_topn.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
         TableScan
           alias: druid_table_1
           properties:
-            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":["robot"],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f1","direction":"descending"}]},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f1","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f1","fieldName":"added"},{"type":"doubleSum","name":"$f2","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
             druid.query.type groupBy
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
@@ -109,54 +109,24 @@ ORDER BY s DESC
 LIMIT 100
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"none","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: __time (type: timestamp), robot (type: string), $f2 (type: float), $f3 (type: float)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col3 (type: float)
-                sort order: -
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                TopN Hash Memory Usage: 0.1
-                value expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: float)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: timestamp), VALUE._col1 (type: string), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: float)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 100
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: string), _col0 (type: timestamp), _col2 (type: float), _col3 (type: float)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"extraction","dimension":"__time","outputName":"extract","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","timeZone":"UTC"}},{"type":"default","dimension":"robot"}],"limitSpec":{"type":"default","limit":100,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), extract (type: timestamp), $f2 (type: float), $f3 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_year(`__time`), max(added), sum(variation) as s
@@ -173,50 +143,24 @@ ORDER BY s DESC
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"year","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: robot (type: string), __time (type: timestamp), $f2 (type: float), $f3 (type: float)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col3 (type: float)
-                sort order: -
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                TopN Hash Memory Usage: 0.1
-                value expressions: _col0 (type: string), _col1 (type: timestamp), _col2 (type: float)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: float)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: 10
+      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_year","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"year","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_year (type: timestamp), $f2 (type: float), $f3 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_month(`__time`), max(added), sum(variation) as s
@@ -233,50 +177,24 @@ ORDER BY s
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"month","dimensions":["robot"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: robot (type: string), __time (type: timestamp), $f2 (type: float), $f3 (type: float)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col3 (type: float)
-                sort order: +
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                TopN Hash Memory Usage: 0.1
-                value expressions: _col0 (type: string), _col1 (type: timestamp), _col2 (type: float)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: timestamp), VALUE._col2 (type: float), KEY.reducesinkkey0 (type: float)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
-      limit: 10
+      limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f3","direction":"ascending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f2","fieldName":"added"},{"type":"doubleSum","name":"$f3","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_month (type: timestamp), $f2 (type: float), $f3 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
@@ -293,54 +211,24 @@ ORDER BY s DESC, m DESC
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"month","dimensions":["robot","namespace"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: robot (type: string), __time (type: timestamp), $f3 (type: float), $f4 (type: float)
-              outputColumnNames: _col0, _col2, _col3, _col4
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col4 (type: float), _col3 (type: float)
-                sort order: --
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                TopN Hash Memory Usage: 0.1
-                value expressions: _col0 (type: string), _col2 (type: timestamp)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col2 (type: timestamp), KEY.reducesinkkey1 (type: float), KEY.reducesinkkey0 (type: float)
-          outputColumnNames: _col0, _col2, _col3, _col4
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col2 (type: timestamp), _col3 (type: float), _col4 (type: float)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"$f4","direction":"descending","dimensionOrder":"numeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_month (type: timestamp), $f3 (type: float), $f4 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s
@@ -357,54 +245,24 @@ ORDER BY robot ASC, m DESC
 LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-0 is a root stage
 
 STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: druid_table_1
-            properties:
-              druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"month","dimensions":["robot","namespace"],"limitSpec":{"type":"default"},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
-              druid.query.type groupBy
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: robot (type: string), __time (type: timestamp), $f3 (type: float), $f4 (type: float)
-              outputColumnNames: _col0, _col2, _col3, _col4
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string), _col3 (type: float)
-                sort order: +-
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                TopN Hash Memory Usage: 0.1
-                value expressions: _col2 (type: timestamp), _col4 (type: float)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col1 (type: timestamp), KEY.reducesinkkey1 (type: float), VALUE._col2 (type: float)
-          outputColumnNames: _col0, _col2, _col3, _col4
-          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col2 (type: timestamp), _col3 (type: float), _col4 (type: float)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        ListSink
+        TableScan
+          alias: druid_table_1
+          properties:
+            druid.query.json {"queryType":"groupBy","dataSource":"wikipedia","granularity":"all","dimensions":[{"type":"default","dimension":"robot"},{"type":"default","dimension":"namespace"},{"type":"extraction","dimension":"__time","outputName":"floor_month","extractionFn":{"type":"timeFormat","format":"yyyy-MM-dd'T'HH:mm:ss.SSS'Z'","granularity":"month","timeZone":"UTC","locale":"en-US"}}],"limitSpec":{"type":"default","limit":10,"columns":[{"dimension":"robot","direction":"ascending","dimensionOrder":"alphanumeric"},{"dimension":"$f3","direction":"descending","dimensionOrder":"numeric"}]},"aggregations":[{"type":"doubleMax","name":"$f3","fieldName":"added"},{"type":"doubleSum","name":"$f4","fieldName":"variation"}],"intervals":["1900-01-01T00:00:00.000/3000-01-01T00:00:00.000"]}
+            druid.query.type groupBy
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          Select Operator
+            expressions: robot (type: string), floor_month (type: timestamp), $f3 (type: float), $f4 (type: float)
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            ListSink
 
 PREHOOK: query: EXPLAIN
 SELECT robot, floor_year(`__time`), max(added), sum(variation) as s

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/filter_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_union.q.out b/ql/src/test/results/clientpositive/filter_union.q.out
index e0f7ac3..88669da 100644
--- a/ql/src/test/results/clientpositive/filter_union.q.out
+++ b/ql/src/test/results/clientpositive/filter_union.q.out
@@ -26,10 +26,8 @@ where m >2
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-3, Stage-4, Stage-5
+  Stage-2 depends on stages: Stage-1, Stage-3
   Stage-3 is a root stage
-  Stage-4 is a root stage
-  Stage-5 is a root stage
   Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
@@ -40,31 +38,32 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(key)
                 keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   tag: -1
                   value expressions: _col1 (type: bigint)
                   auto parallelism: false
       Path -> Alias:
-        nullscan://null/default.src/part_ [null-subquery1-subquery1-subquery1:$hdt$_2-subquery1-subquery1-subquery1:src]
+#### A masked pattern was here ####
       Path -> Partition:
-        nullscan://null/default.src/part_ 
+#### A masked pattern was here ####
           Partition
-            input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
               COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
@@ -80,10 +79,10 @@ STAGE PLANS:
               rawDataSize 5312
               serialization.ddl struct src { string key, string value}
               serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               totalSize 5812
 #### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.NullStructSerDe
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -108,7 +107,7 @@ STAGE PLANS:
               name: default.src
             name: default.src
       Truncated Path -> Alias:
-        nullscan://null/default.src/part_ [null-subquery1-subquery1-subquery1:$hdt$_2-subquery1-subquery1-subquery1:src]
+        /src [null-subquery1:$hdt$_0-subquery1:src]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -116,11 +115,11 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: bigint), 1 (type: int)
+            expressions: _col0 (type: string), _col1 (type: bigint), 3 (type: int)
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
@@ -146,65 +145,13 @@ STAGE PLANS:
           TableScan
             GatherStats: false
             Union
-              Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col0,_col1,_col2
-                      columns.types string:bigint:int
-                      escape.delim \
-                      hive.serialization.extend.additional.nesting.levels true
-                      serialization.escape.crlf true
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-          TableScan
-            GatherStats: false
-            Union
-              Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col0,_col1,_col2
-                      columns.types string:bigint:int
-                      escape.delim \
-                      hive.serialization.extend.additional.nesting.levels true
-                      serialization.escape.crlf true
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-          TableScan
-            GatherStats: false
-            Union
-              Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
                 GlobalTableId: 0
 #### A masked pattern was here ####
                 NumFilesPerFileSink: 1
-                Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -224,13 +171,13 @@ STAGE PLANS:
           TableScan
             GatherStats: false
             Union
-              Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
                 GlobalTableId: 0
 #### A masked pattern was here ####
                 NumFilesPerFileSink: 1
-                Statistics: Num rows: 502 Data size: 5332 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -294,50 +241,6 @@ STAGE PLANS:
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-#### A masked pattern was here ####
-          Partition
-            base file name: -mr-10006
-            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-            properties:
-              column.name.delimiter ,
-              columns _col0,_col1,_col2
-              columns.types string,bigint,int
-              escape.delim \
-              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              properties:
-                column.name.delimiter ,
-                columns _col0,_col1,_col2
-                columns.types string,bigint,int
-                escape.delim \
-                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-#### A masked pattern was here ####
-          Partition
-            base file name: -mr-10007
-            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-            properties:
-              column.name.delimiter ,
-              columns _col0,_col1,_col2
-              columns.types string,bigint,int
-              escape.delim \
-              serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          
-              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-              properties:
-                column.name.delimiter ,
-                columns _col0,_col1,_col2
-                columns.types string,bigint,int
-                escape.delim \
-                serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Truncated Path -> Alias:
 #### A masked pattern was here ####
 
@@ -348,221 +251,6 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(key)
-                keys: key (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col1 (type: bigint)
-                  auto parallelism: false
-      Path -> Alias:
-        nullscan://null/default.src/part_ [null-subquery1-subquery1-subquery2:$hdt$_2-subquery1-subquery1-subquery2:src]
-      Path -> Partition:
-        nullscan://null/default.src/part_ 
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-              bucket_count -1
-              column.name.delimiter ,
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.NullStructSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                bucket_count -1
-                column.name.delimiter ,
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        nullscan://null/default.src/part_ [null-subquery1-subquery1-subquery2:$hdt$_2-subquery1-subquery1-subquery2:src]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: bigint), 2 (type: int)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    column.name.delimiter ,
-                    columns _col0,_col1,_col2
-                    columns.types string,bigint,int
-                    escape.delim \
-                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: key
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count(key)
-                keys: key (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col1 (type: bigint)
-                  auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-              bucket_count -1
-              column.name.delimiter ,
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
-                bucket_count -1
-                column.name.delimiter ,
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [null-subquery1-subquery2:$hdt$_2-subquery1-subquery2:src]
-      Needs Tagging: false
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: bigint), 3 (type: int)
-            outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    column.name.delimiter ,
-                    columns _col0,_col1,_col2
-                    columns.types string,bigint,int
-                    escape.delim \
-                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
             Select Operator
               expressions: key (type: string)
               outputColumnNames: key
@@ -632,7 +320,7 @@ STAGE PLANS:
               name: default.src
             name: default.src
       Truncated Path -> Alias:
-        /src [null-subquery2:$hdt$_2-subquery2:src]
+        /src [null-subquery2:$hdt$_0-subquery2:src]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
index 2dfcc18..c1bb62e 100644
--- a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
@@ -30,18 +30,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -51,18 +51,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -224,13 +224,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -324,18 +324,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -345,18 +345,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -518,17 +518,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -622,18 +622,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -643,18 +643,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -816,13 +816,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -916,18 +916,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -937,18 +937,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -1110,17 +1110,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_position.q.out b/ql/src/test/results/clientpositive/groupby_position.q.out
index 0a6c4a4..2ec9ecd 100644
--- a/ql/src/test/results/clientpositive/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/groupby_position.q.out
@@ -545,24 +545,24 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -578,14 +578,14 @@ STAGE PLANS:
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: string)
           TableScan
             Reduce Output Operator
               key expressions: _col0 (type: string)
               sort order: +
               Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -595,7 +595,7 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:
@@ -610,15 +610,15 @@ STAGE PLANS:
             Reduce Output Operator
               key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
               sort order: --++
-              Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -631,24 +631,24 @@ STAGE PLANS:
             alias: src2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string)
                   sort order: ++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             table:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
index 3444efc..594fa26 100644
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
@@ -14,33 +14,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-              Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
+              Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -49,10 +49,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -212,9 +212,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index_bitmap__
-            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset
@@ -252,37 +252,37 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
-            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-              Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
+              Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -291,10 +291,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -305,9 +305,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__srcpart_srcpart_index_bitmap__
-            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
index 86cf47a..685f612 100644
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
@@ -14,33 +14,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-              Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
+              Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -49,10 +49,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -217,9 +217,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index_compact__
-            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
             Filter Operator
-              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offsets (type: array<bigint>)
                 outputColumnNames: _col0, _col1
@@ -250,37 +250,37 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
-            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-              Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
+              Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -289,10 +289,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 244 Data size: 2593 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -331,9 +331,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__srcpart_srcpart_index_compact__
-            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
             Filter Operator
-              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0)) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offsets (type: array<bigint>)
                 outputColumnNames: _col0, _col1


[05/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
index 2a10adb..55fdf26 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_scalar.q.out
@@ -1366,26 +1366,24 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col0 (type: string)
                   1 _col2 (type: string)
                 outputColumnNames: _col1, _col2, _col3
-                Statistics: Num rows: 26 Data size: 216 Basic stats: COMPLETE Column stats: COMPLETE
-                Filter Operator
-                  predicate: ((_col1 + 100) < CASE WHEN (_col3 is null) THEN (null) ELSE (_col2) END) (type: boolean)
-                  Statistics: Num rows: 8 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 8 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
+                residual filter predicates: {((_col1 + 100) < CASE WHEN (_col3 is null) THEN (null) ELSE (_col2) END)}
+                Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                  Group By Operator
+                    aggregations: count()
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      sort order: 
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
+                      value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1497,26 +1495,20 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col0 (type: string)
-                  1 _col2 (type: string)
-                outputColumnNames: _col1, _col2
-                Statistics: Num rows: 26 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
-                Filter Operator
-                  predicate: (100 < CASE WHEN (_col2 is null) THEN (null) ELSE (_col1) END) (type: boolean)
-                  Statistics: Num rows: 8 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    Statistics: Num rows: 8 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions: _col0 (type: bigint)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1541,16 +1533,18 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 1625 Basic stats: COMPLETE Column stats: COMPLETE
-                Select Operator
-                  expressions: _col1 (type: int), true (type: boolean), _col0 (type: string)
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    key expressions: _col2 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col2 (type: string)
-                    Statistics: Num rows: 13 Data size: 1677 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: int), _col1 (type: boolean)
+                Filter Operator
+                  predicate: (100 < _col1) (type: boolean)
+                  Statistics: Num rows: 4 Data size: 500 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: _col0 (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: COMPLETE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-0
     Fetch Operator
@@ -1628,26 +1622,24 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col4 (type: string)
                   1 _col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Statistics: Num rows: 28 Data size: 17703 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (UDFToDouble(_col5) > CASE WHEN (_col10 is null) THEN (null) ELSE (_col9) END) (type: boolean)
+                residual filter predicates: {(UDFToDouble(_col5) > CASE WHEN (_col10 is null) THEN (null) ELSE (_col9) END)}
+                Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                   Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 4 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1879,7 +1871,7 @@ POSTHOOK: Input: default@part_null
 85768	almond antique chartreuse lavender yellow	Manufacturer#1	Brand#12	LARGE BRUSHED STEEL	34	SM BAG	1753.76	refull
 86428	almond aquamarine burnished black steel	Manufacturer#1	Brand#12	STANDARD ANODIZED STEEL	28	WRAP BAG	1414.42	arefully 
 90681	almond antique chartreuse khaki white	Manufacturer#3	Brand#31	MEDIUM BURNISHED TIN	17	SM CASE	1671.68	are slyly after the sl
-Warning: Shuffle Join MERGEJOIN[28][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
+Warning: Shuffle Join MERGEJOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: explain select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null)
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null)
@@ -1963,23 +1955,21 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col4 (type: string)
                   1 _col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
-                Statistics: Num rows: 28 Data size: 17703 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col5 >= CASE WHEN (_col10 is null) THEN (null) ELSE (_col9) END) (type: boolean)
+                residual filter predicates: {(_col5 >= CASE WHEN (_col10 is null) THEN (null) ELSE (_col9) END)}
+                Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                   Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+                  Reduce Output Operator
+                    sort order: 
                     Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      sort order: 
-                      Statistics: Num rows: 9 Data size: 5690 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
+                    value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -1989,8 +1979,8 @@ STAGE PLANS:
                 keys:
                   0 
                   1 
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12
-                residual filter predicates: {(_col7 <= _col12)}
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+                residual filter predicates: {(_col7 <= _col9)}
                 Statistics: Num rows: 3 Data size: 1923 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string)
@@ -2041,7 +2031,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[28][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
+Warning: Shuffle Join MERGEJOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: select * from part where p_size >= (select min(p_size) from part_null where part_null.p_type = part.p_type) AND p_retailprice <= (select max(p_retailprice) from part_null)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part
@@ -3916,26 +3906,24 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col4 (type: int)
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col5, _col6
+                residual filter predicates: {(_col1 <> CASE WHEN (_col6 is null) THEN (null) ELSE (_col5) END)}
                 Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                Filter Operator
-                  predicate: (_col1 <> CASE WHEN (_col6 is null) THEN (null) ELSE (_col5) END) (type: boolean)
-                  Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: _col0 (type: int), _col3 (type: int)
-                    outputColumnNames: _col0, _col1
+                Select Operator
+                  expressions: _col0 (type: int), _col3 (type: int)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 7 
             Execution mode: llap
             Reduce Operator Tree:
@@ -4127,26 +4115,24 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col4 (type: int)
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col5, _col6
+                residual filter predicates: {(_col1 <> CASE WHEN (_col6 is null) THEN (null) ELSE (_col5) END)}
                 Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                Filter Operator
-                  predicate: (_col1 <> CASE WHEN (_col6 is null) THEN (null) ELSE (_col5) END) (type: boolean)
-                  Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                  Select Operator
-                    expressions: _col0 (type: int), _col3 (type: int)
-                    outputColumnNames: _col0, _col1
+                Select Operator
+                  expressions: _col0 (type: int), _col3 (type: int)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 7 
             Execution mode: llap
             Reduce Operator Tree:
@@ -4857,26 +4843,24 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col2 (type: int)
                   1 _col2 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
-                Statistics: Num rows: 5 Data size: 261 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col1 > CASE WHEN (_col11 is null) THEN (null) ELSE (_col10) END) (type: boolean)
+                residual filter predicates: {(_col1 > CASE WHEN (_col11 is null) THEN (null) ELSE (_col10) END)}
+                Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                   Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 1 Data size: 52 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 4 
             Execution mode: llap
             Reduce Operator Tree:
@@ -5096,7 +5080,7 @@ POSTHOOK: Input: default@emps
 110	John	40	M	Vancouver	2	NULL	false	true	2002-05-03
 120	Wilma	20	F	NULL	1	5	NULL	true	2005-09-07
 130	Alice	40	F	Vancouver	2	NULL	false	true	2007-01-01
-Warning: Shuffle Join MERGEJOIN[28][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
+Warning: Shuffle Join MERGEJOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: explain select * from emps where deptno <> (select sum(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts)
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from emps where deptno <> (select sum(deptno) from depts where depts.name = emps.name) and empno > (select count(name) from depts)
@@ -5180,23 +5164,21 @@ STAGE PLANS:
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
-                     Left Outer Join 0 to 1
+                     Inner Join 0 to 1
                 keys:
                   0 _col1 (type: string)
                   1 _col2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11
+                residual filter predicates: {(UDFToLong(_col2) <> CASE WHEN (_col11 is null) THEN (null) ELSE (_col10) END)}
                 Statistics: Num rows: 5 Data size: 261 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (UDFToLong(_col2) <> CASE WHEN (_col11 is null) THEN (null) ELSE (_col10) END) (type: boolean)
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                   Statistics: Num rows: 5 Data size: 261 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
+                  Reduce Output Operator
+                    sort order: 
                     Statistics: Num rows: 5 Data size: 261 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      sort order: 
-                      Statistics: Num rows: 5 Data size: 261 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)
+                    value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -5206,8 +5188,8 @@ STAGE PLANS:
                 keys:
                   0 
                   1 
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col13
-                residual filter predicates: {(UDFToLong(_col0) > _col13)}
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
+                residual filter predicates: {(UDFToLong(_col0) > _col10)}
                 Statistics: Num rows: 1 Data size: 61 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: boolean), _col8 (type: boolean), _col9 (type: date)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
index def04a8..4b1e92d 100644
--- a/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_auto_smb_mapjoin_14.q.out
@@ -841,30 +841,30 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           Group By Vectorization:
@@ -1010,30 +1010,30 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           Group By Vectorization:
@@ -1802,30 +1802,30 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key < 8) and (key < 6)) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key < 6) (type: boolean)
+                    Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: NONE
                       Merge Join Operator
                         condition map:
                              Inner Join 0 to 1
                         keys:
                           0 _col0 (type: int)
                           1 _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           aggregations: count()
                           Group By Vectorization:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_date_1.q.out b/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
index e8f32af..d80da7e 100644
--- a/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/vector_date_1.q.out
@@ -639,7 +639,7 @@ STAGE PLANS:
                   alias: vector_date_1
                   Statistics: Num rows: 3 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((dt1 = 2001-01-01) and (2001-01-01 = dt1) and (dt1 <> 1970-01-01) and (1970-01-01 <> dt1) and (dt1 > 1970-01-01) and (dt1 >= 1970-01-01) and (1970-01-01 < dt1) and (1970-01-01 <= dt1)) (type: boolean)
+                    predicate: ((2001-01-01 = dt1) and (dt1 <> 1970-01-01) and (1970-01-01 <> dt1)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: 2001-01-01 (type: date), dt2 (type: date)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out
index a4de667..61fd8a3 100644
--- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out
@@ -30,18 +30,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -51,18 +51,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -224,13 +224,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -324,18 +324,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -345,18 +345,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -518,17 +518,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -622,18 +622,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -643,18 +643,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -816,13 +816,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -916,18 +916,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -937,18 +937,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -1110,17 +1110,17 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
 #### A masked pattern was here ####
               NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/masking_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_4.q.out b/ql/src/test/results/clientpositive/masking_4.q.out
index 36889e6..527da21 100644
--- a/ql/src/test/results/clientpositive/masking_4.q.out
+++ b/ql/src/test/results/clientpositive/masking_4.q.out
@@ -84,15 +84,15 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean)
-              Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+              predicate: (((key % 2) = 0) and (key = 5)) (type: boolean)
+              Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), reverse(value) (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -124,15 +124,15 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean)
-              Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+              predicate: (((key % 2) = 0) and (key = 5)) (type: boolean)
+              Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), reverse(value) (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/materialized_view_create.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_create.q.out b/ql/src/test/results/clientpositive/materialized_view_create.q.out
index fc88dbc..0fca69f 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create.q.out
@@ -164,3 +164,43 @@ key	value
 numFiles	1
 totalSize	710
 #### A masked pattern was here ####
+PREHOOK: query: drop materialized view cmv_mat_view
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view
+PREHOOK: Output: default@cmv_mat_view
+POSTHOOK: query: drop materialized view cmv_mat_view
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view
+POSTHOOK: Output: default@cmv_mat_view
+PREHOOK: query: drop materialized view cmv_mat_view2
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view2
+PREHOOK: Output: default@cmv_mat_view2
+POSTHOOK: query: drop materialized view cmv_mat_view2
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view2
+POSTHOOK: Output: default@cmv_mat_view2
+PREHOOK: query: drop materialized view cmv_mat_view3
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view3
+PREHOOK: Output: default@cmv_mat_view3
+POSTHOOK: query: drop materialized view cmv_mat_view3
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view3
+POSTHOOK: Output: default@cmv_mat_view3
+PREHOOK: query: drop materialized view cmv_mat_view4
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view4
+PREHOOK: Output: default@cmv_mat_view4
+POSTHOOK: query: drop materialized view cmv_mat_view4
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view4
+POSTHOOK: Output: default@cmv_mat_view4
+PREHOOK: query: drop materialized view cmv_mat_view5
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view5
+PREHOOK: Output: default@cmv_mat_view5
+POSTHOOK: query: drop materialized view cmv_mat_view5
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view5
+POSTHOOK: Output: default@cmv_mat_view5

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out b/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out
index 041621f..35e9a5d 100644
--- a/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out
+++ b/ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out
@@ -320,3 +320,11 @@ POSTHOOK: Input: default@cmv_basetable
 #### A masked pattern was here ####
 3	9.80	3	978.76
 3	978.76	3	978.76
+PREHOOK: query: drop materialized view cmv_mat_view
+PREHOOK: type: DROP_MATERIALIZED_VIEW
+PREHOOK: Input: default@cmv_mat_view
+PREHOOK: Output: default@cmv_mat_view
+POSTHOOK: query: drop materialized view cmv_mat_view
+POSTHOOK: type: DROP_MATERIALIZED_VIEW
+POSTHOOK: Input: default@cmv_mat_view
+POSTHOOK: Output: default@cmv_mat_view

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
index 09f30b8..abfc8e6 100644
--- a/ql/src/test/results/clientpositive/outer_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
@@ -30,18 +30,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -51,18 +51,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -224,13 +224,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -324,18 +324,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -345,18 +345,18 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   null sort order: a
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col1 (type: string)
                   auto parallelism: false
@@ -518,13 +518,13 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/perf/query1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query1.q.out b/ql/src/test/results/clientpositive/perf/query1.q.out
index 9c5d440..46a3812 100644
--- a/ql/src/test/results/clientpositive/perf/query1.q.out
+++ b/ql/src/test/results/clientpositive/perf/query1.q.out
@@ -63,44 +63,44 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7
-      File Output Operator [FS_54]
-        Limit [LIM_53] (rows=100 width=860)
+      File Output Operator [FS_53]
+        Limit [LIM_52] (rows=100 width=860)
           Number of rows:100
-          Select Operator [SEL_52] (rows=32266667 width=860)
+          Select Operator [SEL_51] (rows=32266667 width=860)
             Output:["_col0"]
           <-Reducer 6 [SIMPLE_EDGE]
-            SHUFFLE [RS_51]
-              Select Operator [SEL_50] (rows=32266667 width=860)
+            SHUFFLE [RS_50]
+              Select Operator [SEL_49] (rows=32266667 width=860)
                 Output:["_col0"]
-                Filter Operator [FIL_49] (rows=32266667 width=860)
+                Filter Operator [FIL_48] (rows=32266667 width=860)
                   predicate:(_col2 > CASE WHEN (_col8 is null) THEN (null) ELSE (_col7) END)
-                  Merge Join Operator [MERGEJOIN_78] (rows=96800003 width=860)
-                    Conds:RS_45._col1=RS_46._col2(Left Outer),Output:["_col2","_col6","_col7","_col8"]
+                  Merge Join Operator [MERGEJOIN_81] (rows=96800003 width=860)
+                    Conds:RS_45._col1=RS_46._col2(Inner),Output:["_col2","_col6","_col7","_col8"]
                   <-Reducer 5 [SIMPLE_EDGE]
                     SHUFFLE [RS_45]
                       PartitionCols:_col1
-                      Merge Join Operator [MERGEJOIN_76] (rows=88000001 width=860)
+                      Merge Join Operator [MERGEJOIN_79] (rows=88000001 width=860)
                         Conds:RS_42._col0=RS_43._col0(Inner),Output:["_col1","_col2","_col6"]
                       <-Map 12 [SIMPLE_EDGE]
                         SHUFFLE [RS_43]
                           PartitionCols:_col0
                           Select Operator [SEL_19] (rows=80000000 width=860)
                             Output:["_col0","_col1"]
-                            Filter Operator [FIL_71] (rows=80000000 width=860)
+                            Filter Operator [FIL_74] (rows=80000000 width=860)
                               predicate:c_customer_sk is not null
                               TableScan [TS_17] (rows=80000000 width=860)
                                 default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_customer_id"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_42]
                           PartitionCols:_col0
-                          Merge Join Operator [MERGEJOIN_75] (rows=34842647 width=77)
+                          Merge Join Operator [MERGEJOIN_78] (rows=34842647 width=77)
                             Conds:RS_39._col1=RS_40._col0(Inner),Output:["_col0","_col1","_col2"]
                           <-Map 11 [SIMPLE_EDGE]
                             SHUFFLE [RS_40]
                               PartitionCols:_col0
                               Select Operator [SEL_16] (rows=852 width=1910)
                                 Output:["_col0"]
-                                Filter Operator [FIL_70] (rows=852 width=1910)
+                                Filter Operator [FIL_73] (rows=852 width=1910)
                                   predicate:((s_state = 'NM') and s_store_sk is not null)
                                   TableScan [TS_14] (rows=1704 width=1910)
                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
@@ -116,14 +116,14 @@ Stage-0
                                     PartitionCols:_col0, _col1
                                     Group By Operator [GBY_10] (rows=63350266 width=77)
                                       Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col2, _col1
-                                      Merge Join Operator [MERGEJOIN_74] (rows=63350266 width=77)
+                                      Merge Join Operator [MERGEJOIN_77] (rows=63350266 width=77)
                                         Conds:RS_6._col0=RS_7._col0(Inner),Output:["_col1","_col2","_col3"]
                                       <-Map 1 [SIMPLE_EDGE]
                                         SHUFFLE [RS_6]
                                           PartitionCols:_col0
                                           Select Operator [SEL_2] (rows=57591150 width=77)
                                             Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_68] (rows=57591150 width=77)
+                                            Filter Operator [FIL_71] (rows=57591150 width=77)
                                               predicate:(sr_returned_date_sk is not null and sr_store_sk is not null and sr_customer_sk is not null)
                                               TableScan [TS_0] (rows=57591150 width=77)
                                                 default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_customer_sk","sr_store_sk","sr_fee"]
@@ -132,7 +132,7 @@ Stage-0
                                           PartitionCols:_col0
                                           Select Operator [SEL_5] (rows=36524 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_69] (rows=36524 width=1119)
+                                            Filter Operator [FIL_72] (rows=36524 width=1119)
                                               predicate:((d_year = 2000) and d_date_sk is not null)
                                               TableScan [TS_3] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
@@ -152,14 +152,14 @@ Stage-0
                                 PartitionCols:_col0
                                 Group By Operator [GBY_30] (rows=63350266 width=77)
                                   Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col2, _col1
-                                  Merge Join Operator [MERGEJOIN_77] (rows=63350266 width=77)
+                                  Merge Join Operator [MERGEJOIN_80] (rows=63350266 width=77)
                                     Conds:RS_26._col0=RS_27._col0(Inner),Output:["_col1","_col2","_col3"]
                                   <-Map 1 [SIMPLE_EDGE]
                                     SHUFFLE [RS_26]
                                       PartitionCols:_col0
                                       Select Operator [SEL_22] (rows=57591150 width=77)
                                         Output:["_col0","_col1","_col2","_col3"]
-                                        Filter Operator [FIL_72] (rows=57591150 width=77)
+                                        Filter Operator [FIL_75] (rows=57591150 width=77)
                                           predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
                                            Please refer to the previous TableScan [TS_0]
                                   <-Map 10 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/perf/query30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query30.q.out b/ql/src/test/results/clientpositive/perf/query30.q.out
index 29f2061..6f5d8ba 100644
--- a/ql/src/test/results/clientpositive/perf/query30.q.out
+++ b/ql/src/test/results/clientpositive/perf/query30.q.out
@@ -77,49 +77,49 @@ Stage-0
     limit:100
     Stage-1
       Reducer 4
-      File Output Operator [FS_67]
-        Limit [LIM_66] (rows=100 width=860)
+      File Output Operator [FS_66]
+        Limit [LIM_65] (rows=100 width=860)
           Number of rows:100
-          Select Operator [SEL_65] (rows=32266667 width=860)
+          Select Operator [SEL_64] (rows=96800003 width=860)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
           <-Reducer 3 [SIMPLE_EDGE]
-            SHUFFLE [RS_64]
-              Select Operator [SEL_63] (rows=32266667 width=860)
+            SHUFFLE [RS_63]
+              Select Operator [SEL_62] (rows=96800003 width=860)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
-                Filter Operator [FIL_62] (rows=32266667 width=860)
-                  predicate:(_col2 > CASE WHEN (_col20 is null) THEN (null) ELSE (_col19) END)
-                  Select Operator [SEL_61] (rows=96800003 width=860)
-                    Output:["_col2","_col6","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
-                    Merge Join Operator [MERGEJOIN_105] (rows=96800003 width=860)
-                      Conds:RS_58._col0=RS_59._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col18","_col19","_col20"]
-                    <-Reducer 2 [SIMPLE_EDGE]
-                      SHUFFLE [RS_58]
+                Merge Join Operator [MERGEJOIN_107] (rows=96800003 width=860)
+                  Conds:RS_59._col0=RS_60._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col18"]
+                <-Reducer 2 [SIMPLE_EDGE]
+                  SHUFFLE [RS_59]
+                    PartitionCols:_col0
+                    Merge Join Operator [MERGEJOIN_101] (rows=88000001 width=860)
+                      Conds:RS_56._col2=RS_57._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
+                    <-Map 5 [SIMPLE_EDGE]
+                      SHUFFLE [RS_57]
                         PartitionCols:_col0
-                        Merge Join Operator [MERGEJOIN_99] (rows=88000001 width=860)
-                          Conds:RS_55._col2=RS_56._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
-                        <-Map 5 [SIMPLE_EDGE]
-                          SHUFFLE [RS_56]
-                            PartitionCols:_col0
-                            Select Operator [SEL_5] (rows=20000000 width=1014)
-                              Output:["_col0"]
-                              Filter Operator [FIL_92] (rows=20000000 width=1014)
-                                predicate:((ca_state = 'IL') and ca_address_sk is not null)
-                                TableScan [TS_3] (rows=40000000 width=1014)
-                                  default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
-                        <-Map 1 [SIMPLE_EDGE]
-                          SHUFFLE [RS_55]
-                            PartitionCols:_col2
-                            Select Operator [SEL_2] (rows=80000000 width=860)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
-                              Filter Operator [FIL_91] (rows=80000000 width=860)
-                                predicate:(c_customer_sk is not null and c_current_addr_sk is not null)
-                                TableScan [TS_0] (rows=80000000 width=860)
-                                  default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_customer_id","c_current_addr_sk","c_salutation","c_first_name","c_last_name","c_preferred_cust_flag","c_birth_day","c_birth_month","c_birth_year","c_birth_country","c_login","c_email_address","c_last_review_date"]
-                    <-Reducer 8 [SIMPLE_EDGE]
-                      SHUFFLE [RS_59]
-                        PartitionCols:_col0
-                        Merge Join Operator [MERGEJOIN_104] (rows=24200000 width=1014)
-                          Conds:RS_51._col1=RS_52._col2(Left Outer),Output:["_col0","_col2","_col3","_col4"]
+                        Select Operator [SEL_5] (rows=20000000 width=1014)
+                          Output:["_col0"]
+                          Filter Operator [FIL_94] (rows=20000000 width=1014)
+                            predicate:((ca_state = 'IL') and ca_address_sk is not null)
+                            TableScan [TS_3] (rows=40000000 width=1014)
+                              default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
+                    <-Map 1 [SIMPLE_EDGE]
+                      SHUFFLE [RS_56]
+                        PartitionCols:_col2
+                        Select Operator [SEL_2] (rows=80000000 width=860)
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
+                          Filter Operator [FIL_93] (rows=80000000 width=860)
+                            predicate:(c_customer_sk is not null and c_current_addr_sk is not null)
+                            TableScan [TS_0] (rows=80000000 width=860)
+                              default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_customer_id","c_current_addr_sk","c_salutation","c_first_name","c_last_name","c_preferred_cust_flag","c_birth_day","c_birth_month","c_birth_year","c_birth_country","c_login","c_email_address","c_last_review_date"]
+                <-Reducer 8 [SIMPLE_EDGE]
+                  SHUFFLE [RS_60]
+                    PartitionCols:_col0
+                    Select Operator [SEL_55] (rows=8066666 width=1014)
+                      Output:["_col0","_col2"]
+                      Filter Operator [FIL_54] (rows=8066666 width=1014)
+                        predicate:(_col2 > CASE WHEN (_col4 is null) THEN (null) ELSE (_col3) END)
+                        Merge Join Operator [MERGEJOIN_106] (rows=24200000 width=1014)
+                          Conds:RS_51._col1=RS_52._col2(Inner),Output:["_col0","_col2","_col3","_col4"]
                         <-Reducer 10 [SIMPLE_EDGE]
                           SHUFFLE [RS_52]
                             PartitionCols:_col2
@@ -136,27 +136,27 @@ Stage-0
                                       PartitionCols:_col0
                                       Group By Operator [GBY_42] (rows=44000000 width=1014)
                                         Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col7, _col1
-                                        Merge Join Operator [MERGEJOIN_103] (rows=44000000 width=1014)
+                                        Merge Join Operator [MERGEJOIN_105] (rows=44000000 width=1014)
                                           Conds:RS_38._col2=RS_39._col0(Inner),Output:["_col1","_col3","_col7"]
                                         <-Map 5 [SIMPLE_EDGE]
                                           SHUFFLE [RS_39]
                                             PartitionCols:_col0
                                             Select Operator [SEL_34] (rows=40000000 width=1014)
                                               Output:["_col0","_col1"]
-                                              Filter Operator [FIL_98] (rows=40000000 width=1014)
+                                              Filter Operator [FIL_100] (rows=40000000 width=1014)
                                                 predicate:(ca_address_sk is not null and ca_state is not null)
                                                  Please refer to the previous TableScan [TS_3]
                                         <-Reducer 13 [SIMPLE_EDGE]
                                           SHUFFLE [RS_38]
                                             PartitionCols:_col2
-                                            Merge Join Operator [MERGEJOIN_102] (rows=15838314 width=92)
+                                            Merge Join Operator [MERGEJOIN_104] (rows=15838314 width=92)
                                               Conds:RS_35._col0=RS_36._col0(Inner),Output:["_col1","_col2","_col3"]
                                             <-Map 11 [SIMPLE_EDGE]
                                               SHUFFLE [RS_35]
                                                 PartitionCols:_col0
                                                 Select Operator [SEL_28] (rows=14398467 width=92)
                                                   Output:["_col0","_col1","_col2","_col3"]
-                                                  Filter Operator [FIL_96] (rows=14398467 width=92)
+                                                  Filter Operator [FIL_98] (rows=14398467 width=92)
                                                     predicate:(wr_returned_date_sk is not null and wr_returning_addr_sk is not null)
                                                     TableScan [TS_6] (rows=14398467 width=92)
                                                       default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_returned_date_sk","wr_returning_customer_sk","wr_returning_addr_sk","wr_return_amt"]
@@ -165,7 +165,7 @@ Stage-0
                                                 PartitionCols:_col0
                                                 Select Operator [SEL_11] (rows=36524 width=1119)
                                                   Output:["_col0"]
-                                                  Filter Operator [FIL_94] (rows=36524 width=1119)
+                                                  Filter Operator [FIL_96] (rows=36524 width=1119)
                                                     predicate:((d_year = 2002) and d_date_sk is not null)
                                                     TableScan [TS_9] (rows=73049 width=1119)
                                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
@@ -181,27 +181,27 @@ Stage-0
                                   PartitionCols:_col0, _col1
                                   Group By Operator [GBY_22] (rows=44000000 width=1014)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col7, _col1
-                                    Merge Join Operator [MERGEJOIN_101] (rows=44000000 width=1014)
+                                    Merge Join Operator [MERGEJOIN_103] (rows=44000000 width=1014)
                                       Conds:RS_18._col2=RS_19._col0(Inner),Output:["_col1","_col3","_col7"]
                                     <-Map 5 [SIMPLE_EDGE]
                                       SHUFFLE [RS_19]
                                         PartitionCols:_col0
                                         Select Operator [SEL_14] (rows=40000000 width=1014)
                                           Output:["_col0","_col1"]
-                                          Filter Operator [FIL_95] (rows=40000000 width=1014)
+                                          Filter Operator [FIL_97] (rows=40000000 width=1014)
                                             predicate:ca_address_sk is not null
                                              Please refer to the previous TableScan [TS_3]
                                     <-Reducer 12 [SIMPLE_EDGE]
                                       SHUFFLE [RS_18]
                                         PartitionCols:_col2
-                                        Merge Join Operator [MERGEJOIN_100] (rows=15838314 width=92)
+                                        Merge Join Operator [MERGEJOIN_102] (rows=15838314 width=92)
                                           Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 11 [SIMPLE_EDGE]
                                           SHUFFLE [RS_15]
                                             PartitionCols:_col0
                                             Select Operator [SEL_8] (rows=14398467 width=92)
                                               Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_93] (rows=14398467 width=92)
+                                              Filter Operator [FIL_95] (rows=14398467 width=92)
                                                 predicate:(wr_returned_date_sk is not null and wr_returning_addr_sk is not null and wr_returning_customer_sk is not null)
                                                  Please refer to the previous TableScan [TS_6]
                                         <-Map 14 [SIMPLE_EDGE]


[02/10] hive git commit: HIVE-16888: Upgrade Calcite to 1.13 and Avatica to 1.10 (Remus Rusanu and Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jc...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
index 729da5e..4837ebf 100644
--- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez2.q.out
@@ -349,12 +349,12 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 2) and (key > 1)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: int)
@@ -372,12 +372,12 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 1) and (key > 2)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Map Join Operator
                         condition map:
                              Inner Join 0 to 1
@@ -387,10 +387,10 @@ STAGE PLANS:
                         outputColumnNames: _col0, _col1
                         input vertices:
                           1 Map 2
-                        Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
                           compressed: false
-                          Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                           table:
                               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -426,12 +426,12 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 2) and (key > 1)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: int)
@@ -503,12 +503,12 @@ STAGE PLANS:
                   alias: tab_part
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > 1) and (key > 2)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (key > 2) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 _col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
index 163c5ab..ae27cc1 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
@@ -548,61 +548,61 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string), substr(value, 5) (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         keys: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                           sort order: +++
                           Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: src2
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     keys: _col0 (type: string), _col1 (type: string)
                     mode: complete
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 13 Data size: 137 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 13 Data size: 137 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: string)
         Reducer 3 
             Reduce Operator Tree:
@@ -613,20 +613,20 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
                   sort order: --++
-                  Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 3 Data size: 34 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 29 Data size: 314 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -637,12 +637,12 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/join12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join12.q.out b/ql/src/test/results/clientpositive/spark/join12.q.out
index 88bacf5..efc8fc3 100644
--- a/ql/src/test/results/clientpositive/spark/join12.q.out
+++ b/ql/src/test/results/clientpositive/spark/join12.q.out
@@ -37,51 +37,51 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) < 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) < 100.0) and (UDFToDouble(key) < 80.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (UDFToDouble(key) < 80.0) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -94,14 +94,14 @@ STAGE PLANS:
                   1 _col0 (type: string)
                   2 _col0 (type: string)
                 outputColumnNames: _col0, _col3
-                Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col3 (type: string)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 365 Data size: 3878 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join16.q.out b/ql/src/test/results/clientpositive/spark/join16.q.out
index 15adae3..b3ea6d4 100644
--- a/ql/src/test/results/clientpositive/spark/join16.q.out
+++ b/ql/src/test/results/clientpositive/spark/join16.q.out
@@ -19,34 +19,34 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-                    Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                        Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: tab
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
-                    Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: string)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                        Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -56,14 +56,14 @@ STAGE PLANS:
                   0 _col0 (type: string), _col1 (type: string)
                   1 _col0 (type: string), _col1 (type: string)
                 outputColumnNames: _col0, _col3
-                Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col3 (type: string)
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 19 Data size: 210 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join4.q.out b/ql/src/test/results/clientpositive/spark/join4.q.out
index 8ccc907..ae91a49 100644
--- a/ql/src/test/results/clientpositive/spark/join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/join4.q.out
@@ -72,17 +72,17 @@ STAGE PLANS:
                   alias: src2
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join5.q.out b/ql/src/test/results/clientpositive/spark/join5.q.out
index 2ab020e..d79af77 100644
--- a/ql/src/test/results/clientpositive/spark/join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/join5.q.out
@@ -54,17 +54,17 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out
index f8faaa7..4ddad76 100644
--- a/ql/src/test/results/clientpositive/spark/join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/join8.q.out
@@ -72,17 +72,17 @@ STAGE PLANS:
                   alias: src2
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
index ac056b7..9b8b69c 100644
--- a/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
@@ -857,10 +857,10 @@ STAGE PLANS:
                      Right Outer Join 0 to 1
                      Left Outer Join 1 to 2
                 filter mappings:
-                  1 [0, 2, 2, 2]
+                  1 [0, 1, 2, 1]
                 filter predicates:
                   0 
-                  1 {(VALUE._col0 = 50)} {(VALUE._col0 > 10)} {(VALUE._col0 = 60)} {(VALUE._col0 > 20)}
+                  1 {(VALUE._col0 = 50)} {(VALUE._col0 = 60)}
                   2 
                 keys:
                   0 _col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
index 5de7733..65755e7 100644
--- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
@@ -35,18 +35,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -109,18 +109,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -236,13 +236,13 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -341,18 +341,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -466,18 +466,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -542,17 +542,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col3, _col4
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -651,18 +651,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -725,18 +725,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -852,13 +852,13 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -957,18 +957,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -1082,18 +1082,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -1158,17 +1158,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col3, _col4
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
                     GlobalTableId: 0
 #### A masked pattern was here ####
                     NumFilesPerFileSink: 1
-                    Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
index b2ab7d7..3fcfe2a 100644
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
@@ -35,18 +35,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -109,18 +109,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -236,13 +236,13 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -341,18 +341,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         tag: 0
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -415,18 +415,18 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         null sort order: a
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
                         tag: 1
                         value expressions: _col1 (type: string)
                         auto parallelism: false
@@ -542,13 +542,13 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
                   GlobalTableId: 0
 #### A masked pattern was here ####
                   NumFilesPerFileSink: 1
-                  Statistics: Num rows: 13 Data size: 139 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
index d01674e..27c0873 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
@@ -36,34 +36,34 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-                    Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -73,25 +73,25 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-                  Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -100,10 +100,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -304,34 +304,34 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-                    Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -341,25 +341,25 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-                  Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -368,10 +368,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 23 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 20 Data size: 214 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/ppd_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_join.q.out
index 85cd8c0..1b2487e 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join.q.out
@@ -33,34 +33,34 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-                    Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -71,17 +71,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-                  Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string), _col2 (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -566,34 +566,34 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
-                    Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
+                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((key < '400') and (key <> '4') and (key > '20')) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -604,17 +604,17 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((_col1 > '50') or (_col0 < '50')) (type: boolean)
-                  Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string), _col2 (type: string)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                     File Output Operator
                       compressed: false
-                      Statistics: Num rows: 4 Data size: 46 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 40 Data size: 428 Basic stats: COMPLETE Column stats: NONE
                       table:
                           input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                           output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/4a567f86/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out
index 7f60d98..3035b43 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out
@@ -33,17 +33,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -51,17 +51,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -72,10 +72,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -148,17 +148,17 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
@@ -166,17 +166,17 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -187,10 +187,10 @@ STAGE PLANS:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat