You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sp...@apache.org on 2016/05/26 15:40:23 UTC

[01/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan) [Forced Update!]

Repository: hive
Updated Branches:
  refs/heads/java8 4cbc10e08 -> bd651756a (forced update)


http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
index 20f79c1..762837b 100644
--- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
@@ -469,18 +469,18 @@ STAGE PLANS:
               predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
               Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: l_orderkey (type: int)
-                outputColumnNames: _col0
+                expressions: l_orderkey (type: int), 1 (type: int)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int), 1 (type: int)
+                  keys: _col0 (type: int), _col1 (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                   HashTable Sink Operator
                     keys:
                       0 _col0 (type: int), 1 (type: int)
-                      1 _col0 (type: int), 1 (type: int)
+                      1 _col0 (type: int), _col1 (type: int)
 
   Stage: Stage-8
     Map Reduce
@@ -500,7 +500,7 @@ STAGE PLANS:
                        Left Semi Join 0 to 1
                   keys:
                     0 _col0 (type: int), 1 (type: int)
-                    1 _col0 (type: int), 1 (type: int)
+                    1 _col0 (type: int), _col1 (type: int)
                   outputColumnNames: _col1, _col2
                   Statistics: Num rows: 55 Data size: 6598 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_null_projection.q.out b/ql/src/test/results/clientpositive/vector_null_projection.q.out
index 7517cc2..2e75731 100644
--- a/ql/src/test/results/clientpositive/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/vector_null_projection.q.out
@@ -111,9 +111,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Reduce Output Operator
-                      key expressions: null (type: void)
+                      key expressions: _col0 (type: void)
                       sort order: +
-                      Map-reduce partition columns: null (type: void)
+                      Map-reduce partition columns: _col0 (type: void)
                       Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
           TableScan
             alias: b
@@ -130,13 +130,13 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Reduce Output Operator
-                      key expressions: null (type: void)
+                      key expressions: _col0 (type: void)
                       sort order: +
-                      Map-reduce partition columns: null (type: void)
+                      Map-reduce partition columns: _col0 (type: void)
                       Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
       Reduce Operator Tree:
         Group By Operator
-          keys: null (type: void)
+          keys: KEY._col0 (type: void)
           mode: mergepartial
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_number_compare_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_number_compare_projection.q.out b/ql/src/test/results/clientpositive/vector_number_compare_projection.q.out
index 85f7cc8..4f1a26c 100644
--- a/ql/src/test/results/clientpositive/vector_number_compare_projection.q.out
+++ b/ql/src/test/results/clientpositive/vector_number_compare_projection.q.out
@@ -547,15 +547,15 @@ POSTHOOK: Input: default@vectortab2k
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@scratch_null
 POSTHOOK: Lineage: scratch_null.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: scratch_null.b_null EXPRESSION []
+POSTHOOK: Lineage: scratch_null.b_null SIMPLE []
 POSTHOOK: Lineage: scratch_null.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: scratch_null.bo_null EXPRESSION []
+POSTHOOK: Lineage: scratch_null.bo_null SIMPLE []
 POSTHOOK: Lineage: scratch_null.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: scratch_null.i_null EXPRESSION []
+POSTHOOK: Lineage: scratch_null.i_null SIMPLE []
 POSTHOOK: Lineage: scratch_null.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: scratch_null.si_null EXPRESSION []
+POSTHOOK: Lineage: scratch_null.si_null SIMPLE []
 POSTHOOK: Lineage: scratch_null.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: scratch_null.t_null EXPRESSION []
+POSTHOOK: Lineage: scratch_null.t_null SIMPLE []
 t	si	i	b	bo	t_null	si_null	i_null	b_null	bo_null
 PREHOOK: query: -- The nulled columns ought to create repeated null VectorizedRowBatch for those columns.
 CREATE TABLE vectortab2k_orc_null STORED AS ORC AS SELECT * FROM scratch_null

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
index 93ab473..1e8ce11 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -65,7 +65,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -81,13 +81,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
index c8001e0..47beffd 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -54,7 +54,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
-POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
@@ -76,12 +76,12 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
-POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index 4f1a98d..a0c4709 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -61,7 +61,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.
 POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
@@ -81,9 +81,9 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
index a32f585..eaf419c 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2b.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -65,7 +65,7 @@ POSTHOOK: Lineage: small_alltypesorc3b.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -81,13 +81,13 @@ POSTHOOK: Lineage: small_alltypesorc4b.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE []
 PREHOOK: query: select * from small_alltypesorc1b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1b

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/vector_outer_join5.q.out
index 1b09fda..4c2d0c0 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join5.q.out
@@ -561,7 +561,7 @@ STAGE PLANS:
               HashTable Sink Operator
                 keys:
                   0 UDFToLong(_col1) (type: bigint)
-                  1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                  1 (_col0 pmod 8) (type: bigint)
         $hdt$_2:s 
           TableScan
             alias: s
@@ -590,7 +590,7 @@ STAGE PLANS:
                      Left Outer Join0 to 1
                 keys:
                   0 UDFToLong(_col1) (type: bigint)
-                  1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                  1 (_col0 pmod 8) (type: bigint)
                 outputColumnNames: _col0
                 Statistics: Num rows: 6663 Data size: 53310 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator
@@ -1212,7 +1212,7 @@ STAGE PLANS:
               HashTable Sink Operator
                 keys:
                   0 UDFToLong(_col1) (type: bigint)
-                  1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                  1 (_col0 pmod 8) (type: bigint)
         $hdt$_2:s 
           TableScan
             alias: s
@@ -1241,7 +1241,7 @@ STAGE PLANS:
                      Left Outer Join0 to 1
                 keys:
                   0 UDFToLong(_col1) (type: bigint)
-                  1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                  1 (_col0 pmod 8) (type: bigint)
                 outputColumnNames: _col0
                 Statistics: Num rows: 6663 Data size: 53310 Basic stats: COMPLETE Column stats: NONE
                 Map Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
index 7691dda..e83eeef 100644
--- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
@@ -2294,7 +2294,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1)) and cboolean2 is not null) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
+              predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cstring1 (type: string), ctimestamp1 (type: timestamp), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
@@ -2630,7 +2630,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and (cboolean1 >= 1) and cstring1 is not null) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null)) and cboolean1 is not null) (type: boolean)
+              predicate: ((((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null)) and cboolean1 is not null) (type: boolean)
               Statistics: Num rows: 10239 Data size: 2201421 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)


[62/66] [abbrv] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample8.q.out b/ql/src/test/results/clientpositive/spark/sample8.q.out
index e847fa5..59807de 100644
--- a/ql/src/test/results/clientpositive/spark/sample8.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample8.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -126,7 +126,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -172,7 +172,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -218,7 +218,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -264,7 +264,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/stats0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats0.q.out b/ql/src/test/results/clientpositive/spark/stats0.q.out
index 0b14e21..491b4d0 100644
--- a/ql/src/test/results/clientpositive/spark/stats0.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats0.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -94,7 +94,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1388,7 +1388,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1408,7 +1408,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
index 032b659..ec65619 100644
--- a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
@@ -230,7 +230,7 @@ Database:           	default
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	6                   
 	rawDataSize         	71                  
@@ -271,7 +271,7 @@ Database:           	default
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	49                  

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
index d6df85a..91e6cfb 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
@@ -73,8 +73,8 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 10 (PARTITION-LEVEL SORT, 1), Reducer 9 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 4), Reducer 2 (PARTITION-LEVEL SORT, 4)
-        Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 4), Map 6 (PARTITION-LEVEL SORT, 4)
+        Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2)
+        Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 2), Map 6 (PARTITION-LEVEL SORT, 2)
         Reducer 9 <- Map 8 (GROUP, 1)
         Reducer 4 <- Reducer 3 (SORT, 1)
 #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out b/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
index 57cb338..0dfd7d0 100644
--- a/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
+++ b/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out
@@ -79,7 +79,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -125,7 +125,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -171,7 +171,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -217,7 +217,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out b/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
index 2dfbd1c..3959df6 100644
--- a/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
+++ b/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out
@@ -81,7 +81,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -127,7 +127,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
index 0459d93..5762865 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
@@ -119,7 +119,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP, 4)
+        Reducer 2 <- Map 1 (GROUP, 2)
         Reducer 3 <- Reducer 2 (SORT, 1)
 #### A masked pattern was here ####
       Vertices:

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/stats0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats0.q.out b/ql/src/test/results/clientpositive/stats0.q.out
index bbe38c1..97d66e7 100644
--- a/ql/src/test/results/clientpositive/stats0.q.out
+++ b/ql/src/test/results/clientpositive/stats0.q.out
@@ -71,7 +71,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -91,7 +91,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1384,7 +1384,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1404,7 +1404,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/stats_invalidation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_invalidation.q.out b/ql/src/test/results/clientpositive/stats_invalidation.q.out
index d24fdc3..d822f4f 100644
--- a/ql/src/test/results/clientpositive/stats_invalidation.q.out
+++ b/ql/src/test/results/clientpositive/stats_invalidation.q.out
@@ -44,7 +44,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_only_null.q.out b/ql/src/test/results/clientpositive/stats_only_null.q.out
index 032f6c8..b47fe64 100644
--- a/ql/src/test/results/clientpositive/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/stats_only_null.q.out
@@ -218,7 +218,7 @@ Database:           	default
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	6                   
 	rawDataSize         	71                  
@@ -259,7 +259,7 @@ Database:           	default
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	49                  

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/bucket3.q.out b/ql/src/test/results/clientpositive/tez/bucket3.q.out
index 1532edc..250d03d 100644
--- a/ql/src/test/results/clientpositive/tez/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/tez/bucket3.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -79,7 +79,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/bucket4.q.out b/ql/src/test/results/clientpositive/tez/bucket4.q.out
index 4291e44..b14c672 100644
--- a/ql/src/test/results/clientpositive/tez/bucket4.q.out
+++ b/ql/src/test/results/clientpositive/tez/bucket4.q.out
@@ -56,7 +56,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -76,7 +76,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/ctas.q.out b/ql/src/test/results/clientpositive/tez/ctas.q.out
index d92a446..9dffc0b 100644
--- a/ql/src/test/results/clientpositive/tez/ctas.q.out
+++ b/ql/src/test/results/clientpositive/tez/ctas.q.out
@@ -742,7 +742,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -762,7 +762,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
index fb71214..c1717e3 100644
--- a/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/tez/disable_merge_for_bucketing.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -75,7 +75,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
index 316c914..2c2b2cf 100644
--- a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
@@ -96,7 +96,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -142,7 +142,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -188,7 +188,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -234,7 +234,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -307,7 +307,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -327,7 +327,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -378,7 +378,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -398,7 +398,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
index 2cae5ce..b4423d4 100644
--- a/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out
@@ -183,7 +183,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -203,7 +203,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -257,7 +257,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -302,7 +302,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -347,7 +347,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -392,7 +392,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -560,7 +560,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -580,7 +580,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -634,7 +634,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -680,7 +680,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -726,7 +726,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -772,7 +772,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -952,7 +952,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -972,7 +972,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1026,7 +1026,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1071,7 +1071,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1116,7 +1116,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1161,7 +1161,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1344,7 +1344,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1364,7 +1364,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1410,7 +1410,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1430,7 +1430,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1566,7 +1566,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1586,7 +1586,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1633,7 +1633,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1653,7 +1653,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1774,7 +1774,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1794,7 +1794,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/sample1.q.out b/ql/src/test/results/clientpositive/tez/sample1.q.out
index 2120a1ff..882621b 100644
--- a/ql/src/test/results/clientpositive/tez/sample1.q.out
+++ b/ql/src/test/results/clientpositive/tez/sample1.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/schema_evol_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/schema_evol_stats.q.out b/ql/src/test/results/clientpositive/tez/schema_evol_stats.q.out
index d396a61..af541aa 100644
--- a/ql/src/test/results/clientpositive/tez/schema_evol_stats.q.out
+++ b/ql/src/test/results/clientpositive/tez/schema_evol_stats.q.out
@@ -109,7 +109,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	40                  
@@ -150,7 +150,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	56                  
@@ -305,7 +305,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	384                 
@@ -346,7 +346,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	732                 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_only_null.q.out b/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
index f4cc11e..8c17509 100644
--- a/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
@@ -232,7 +232,7 @@ Database:           	default
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	6                   
 	rawDataSize         	71                  
@@ -273,7 +273,7 @@ Database:           	default
 Table:              	stats_null_part     	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	49                  

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/transform_ppr1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/transform_ppr1.q.out b/ql/src/test/results/clientpositive/tez/transform_ppr1.q.out
index 5d7374f..50deff6 100644
--- a/ql/src/test/results/clientpositive/tez/transform_ppr1.q.out
+++ b/ql/src/test/results/clientpositive/tez/transform_ppr1.q.out
@@ -80,7 +80,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -126,7 +126,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -172,7 +172,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -218,7 +218,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/tez/transform_ppr2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/transform_ppr2.q.out b/ql/src/test/results/clientpositive/tez/transform_ppr2.q.out
index 4d74124..2b1abb9 100644
--- a/ql/src/test/results/clientpositive/tez/transform_ppr2.q.out
+++ b/ql/src/test/results/clientpositive/tez/transform_ppr2.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -128,7 +128,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/transform_ppr1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/transform_ppr1.q.out b/ql/src/test/results/clientpositive/transform_ppr1.q.out
index 8c58139..f15646a 100644
--- a/ql/src/test/results/clientpositive/transform_ppr1.q.out
+++ b/ql/src/test/results/clientpositive/transform_ppr1.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -120,7 +120,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -166,7 +166,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -212,7 +212,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/transform_ppr2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/transform_ppr2.q.out b/ql/src/test/results/clientpositive/transform_ppr2.q.out
index 8e36abd..db99985 100644
--- a/ql/src/test/results/clientpositive/transform_ppr2.q.out
+++ b/ql/src/test/results/clientpositive/transform_ppr2.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -122,7 +122,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/udf_explode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_explode.q.out b/ql/src/test/results/clientpositive/udf_explode.q.out
index ea12e80..bd68e96 100644
--- a/ql/src/test/results/clientpositive/udf_explode.q.out
+++ b/ql/src/test/results/clientpositive/udf_explode.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -102,7 +102,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -266,7 +266,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -286,7 +286,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/udtf_explode.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udtf_explode.q.out b/ql/src/test/results/clientpositive/udtf_explode.q.out
index e067a0a..4f8bd5e 100644
--- a/ql/src/test/results/clientpositive/udtf_explode.q.out
+++ b/ql/src/test/results/clientpositive/udtf_explode.q.out
@@ -79,7 +79,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -99,7 +99,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -331,7 +331,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -351,7 +351,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/union_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_ppr.q.out b/ql/src/test/results/clientpositive/union_ppr.q.out
index c5b1193..9763679 100644
--- a/ql/src/test/results/clientpositive/union_ppr.q.out
+++ b/ql/src/test/results/clientpositive/union_ppr.q.out
@@ -90,7 +90,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -136,7 +136,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'


[05/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index 8b3d353..e3ed731 100644
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@ -205,18 +205,18 @@ STAGE PLANS:
                     predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: l_orderkey (type: int)
-                      outputColumnNames: _col0
+                      expressions: l_orderkey (type: int), 1 (type: int)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int), 1 (type: int)
+                        keys: _col0 (type: int), _col1 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                         Spark HashTable Sink Operator
                           keys:
                             0 _col0 (type: int), 1 (type: int)
-                            1 _col0 (type: int), 1 (type: int)
+                            1 _col0 (type: int), _col1 (type: int)
             Local Work:
               Map Reduce Local Work
         Map 3 
@@ -273,7 +273,7 @@ STAGE PLANS:
                              Left Semi Join 0 to 1
                         keys:
                           0 _col0 (type: int), 1 (type: int)
-                          1 _col0 (type: int), 1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
                         outputColumnNames: _col1, _col2
                         input vertices:
                           1 Map 2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index 50134d9..1d13e69 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -65,7 +65,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -81,13 +81,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index dba7cbd..d6c8f69 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -54,7 +54,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
-POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
@@ -76,12 +76,12 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
-POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
index 1c3b7a6..5a1453f 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -61,7 +61,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.
 POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
@@ -81,9 +81,9 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
index cc1db38..7540d56 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2b.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -65,7 +65,7 @@ POSTHOOK: Lineage: small_alltypesorc3b.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -81,13 +81,13 @@ POSTHOOK: Lineage: small_alltypesorc4b.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE []
 PREHOOK: query: select * from small_alltypesorc1b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1b

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
index 8c065f2..c30984d 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -596,7 +596,7 @@ STAGE PLANS:
                     Spark HashTable Sink Operator
                       keys:
                         0 UDFToLong(_col1) (type: bigint)
-                        1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
             Execution mode: vectorized
             Local Work:
               Map Reduce Local Work
@@ -637,7 +637,7 @@ STAGE PLANS:
                            Left Outer Join0 to 1
                       keys:
                         0 UDFToLong(_col1) (type: bigint)
-                        1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 3
@@ -1300,7 +1300,7 @@ STAGE PLANS:
                     Spark HashTable Sink Operator
                       keys:
                         0 UDFToLong(_col1) (type: bigint)
-                        1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
             Execution mode: vectorized
             Local Work:
               Map Reduce Local Work
@@ -1341,7 +1341,7 @@ STAGE PLANS:
                            Left Outer Join0 to 1
                       keys:
                         0 UDFToLong(_col1) (type: bigint)
-                        1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 3

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index 7caa50d..78b5a7e 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -2345,7 +2345,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1)) and cboolean2 is not null) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
+                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cstring1 (type: string), ctimestamp1 (type: timestamp), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
@@ -2678,7 +2678,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and (cboolean1 >= 1) and cstring1 is not null) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null)) and cboolean1 is not null) (type: boolean)
+                    predicate: ((((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null)) and cboolean1 is not null) (type: boolean)
                     Statistics: Num rows: 10239 Data size: 314333 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/subquery_notin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out
index c600b7f..fed1f89 100644
--- a/ql/src/test/results/clientpositive/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/subquery_notin.q.out
@@ -29,32 +29,32 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '2') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+              Filter Operator
+                predicate: false (type: boolean)
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
               File Output Operator
                 compressed: false
                 table:
@@ -79,7 +79,7 @@ STAGE PLANS:
           TableScan
             Reduce Output Operator
               sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -285,7 +285,7 @@ POSTHOOK: Input: default@src
 199	val_199
 199	val_199
 2	val_2
-Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[26][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, corr
 explain
 select p_mfgr, b.p_name, p_size 
@@ -530,7 +530,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[26][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select p_mfgr, b.p_name, p_size 
 from part b 
 where b.p_name not in 
@@ -569,7 +569,7 @@ Manufacturer#4	almond azure aquamarine papaya violet	12
 Manufacturer#5	almond antique blue firebrick mint	31
 Manufacturer#5	almond aquamarine dodger light gainsboro	46
 Manufacturer#5	almond azure blanched chiffon midnight	23
-Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- agg, non corr
 explain
 select p_name, p_size 
@@ -851,7 +851,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select p_name, p_size 
 from 
 part where part.p_size not in 
@@ -898,7 +898,7 @@ almond aquamarine sandy cyan gainsboro	18
 almond aquamarine yellow dodger mint	7
 almond azure aquamarine papaya violet	12
 almond azure blanched chiffon midnight	23
-Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- agg, corr
 explain
 select p_mfgr, p_name, p_size 
@@ -1212,7 +1212,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[37][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select p_mfgr, p_name, p_size 
 from part b where b.p_size not in 
   (select min(p_size) 
@@ -1253,7 +1253,7 @@ Manufacturer#5	almond antique medium spring khaki	6
 Manufacturer#5	almond azure blanched chiffon midnight	23
 Manufacturer#5	almond antique blue firebrick mint	31
 Manufacturer#5	almond aquamarine dodger light gainsboro	46
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr, Group By in Parent Query
 select li.l_partkey, count(*) 
 from lineitem li 
@@ -1452,7 +1452,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@t1_v
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2_v
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
@@ -1556,17 +1556,17 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key < '11') and (CASE WHEN ((key > '104')) THEN (null) ELSE (key) END < '11')) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              predicate: ((key < '11') and CASE WHEN ((key > '104')) THEN (null) ELSE ((key < '11')) END) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: CASE WHEN ((key > '104')) THEN (null) ELSE (key) END (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -1597,7 +1597,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
index 5114296..793b8be 100644
--- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
@@ -78,7 +78,7 @@ STAGE PLANS:
           TableScan
             Reduce Output Operator
               sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -149,32 +149,32 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '12') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+              Filter Operator
+                predicate: false (type: boolean)
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
                   aggregations: count()
                   mode: hash
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   Reduce Output Operator
                     sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                     value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
               File Output Operator
                 compressed: false
                 table:
@@ -188,7 +188,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- non agg, corr
 explain
 select b.p_mfgr, min(p_retailprice) 
@@ -445,7 +445,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
 from part b 
 group by b.p_mfgr
@@ -620,24 +620,24 @@ STAGE PLANS:
               Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: p_retailprice (type: double)
-                outputColumnNames: p_retailprice
+                outputColumnNames: _col1
                 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(p_retailprice), min(p_retailprice)
+                  aggregations: max(_col1), min(_col1)
                   keys: null (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: null (type: string)
+                    key expressions: _col0 (type: string)
                     sort order: +
-                    Map-reduce partition columns: null (type: string)
+                    Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: double), _col2 (type: double)
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0), min(VALUE._col1)
-          keys: null (type: string)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
index 7853737..f9b1bfe 100644
--- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
+++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
@@ -781,7 +781,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[26][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, corr
 explain
 select p_mfgr, b.p_name, p_size 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/subquery_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out
index fab919d..e10b256 100644
--- a/ql/src/test/results/clientpositive/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/subquery_views.q.out
@@ -111,8 +111,8 @@ where `b`.`key` not in
   from `default`.`src` `a` 	 	 
   where `b`.`value` = `a`.`value`  and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11'	 	 
   ), tableType:VIRTUAL_VIEW)		 
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[42][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[40][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
 PREHOOK: query: explain
 select * 
 from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
@@ -420,8 +420,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[42][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[40][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
 PREHOOK: query: select * 
 from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/table_access_keys_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/table_access_keys_stats.q.out
index bbf24fd..ac33edf 100644
--- a/ql/src/test/results/clientpositive/table_access_keys_stats.q.out
+++ b/ql/src/test/results/clientpositive/table_access_keys_stats.q.out
@@ -77,7 +77,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_3
+Operator:GBY_2
 Table:default@t1
 Keys:key
 
@@ -90,7 +90,7 @@ PREHOOK: query: SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_3
+Operator:GBY_2
 Table:default@t1
 Keys:key,val
 
@@ -104,7 +104,7 @@ PREHOOK: query: SELECT key, 1, val, 2, count(1) FROM T1 GROUP BY key, 1, val, 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_3
+Operator:GBY_2
 Table:default@t1
 Keys:key,val
 
@@ -413,7 +413,7 @@ PREHOOK: Input: default@t2
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 
@@ -439,7 +439,7 @@ Operator:JOIN_8
 Table:default@t1
 Keys:key
 Table:default@t2
-Keys:val,key
+Keys:key
 
 PREHOOK: query: -- no mapping on functions
 SELECT *
@@ -474,7 +474,7 @@ PREHOOK: Input: default@t2
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 
@@ -505,7 +505,7 @@ PREHOOK: Input: default@t3
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 
@@ -518,6 +518,7 @@ Keys:val
 13.0	1
 17.0	1
 46.0	1
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: -- join followed by join
 SELECT *
 FROM
@@ -543,7 +544,7 @@ PREHOOK: Input: default@t3
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out b/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out
index 1559d4b..36f719b 100644
--- a/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_join_filters.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -300,7 +300,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table sm
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_input2
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out b/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out
index 5b68bb7..5984e8f 100644
--- a/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_join_nulls.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -24,7 +24,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 13630578
-Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out
index dfcf7ea..3b59cb8 100644
--- a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out
+++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out
@@ -138,7 +138,7 @@ POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3out
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@bucket_medium@ds=2008-04-08
-Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Map 3' is a cross product
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Map 3' is a cross product
 PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
@@ -559,7 +559,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Map 3' is a cross product
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Map 3' is a cross product
 PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucket_big

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out b/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
index 636410a..0f3bd63 100644
--- a/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
@@ -261,7 +261,7 @@ Stage-0
     Stage-1
       Reducer 2
       File Output Operator [FS_12]
-        Merge Join Operator [MERGEJOIN_18] (rows=5 width=22)
+        Merge Join Operator [MERGEJOIN_17] (rows=5 width=22)
           Conds:RS_8._col3, true=RS_9._col0, _col1(Left Semi),Output:["_col0","_col1","_col2"]
         <-Map 1 [SIMPLE_EDGE]
           SHUFFLE [RS_8]
@@ -269,7 +269,7 @@ Stage-0
             Select Operator [SEL_2] (rows=5 width=20)
               Output:["_col0","_col1","_col2","_col3"]
               Filter Operator [FIL_15] (rows=5 width=20)
-                predicate:(((dimid = 100) = true) and (dimid <> 100) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and (dimid <> 100))
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -279,8 +279,8 @@ Stage-0
               Output:["_col0","_col1"],keys:_col0, _col1
               Select Operator [SEL_5] (rows=2 width=3)
                 Output:["_col0","_col1"]
-                Filter Operator [FIL_17] (rows=2 width=3)
-                  predicate:(((id = 100) = true) and (id <> 100) and (id = 100) is not null)
+                Filter Operator [FIL_16] (rows=2 width=3)
+                  predicate:(((id = 100) = true) and (id <> 100))
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -309,7 +309,7 @@ Stage-0
     Stage-1
       Reducer 2
       File Output Operator [FS_12]
-        Merge Join Operator [MERGEJOIN_18] (rows=2 width=22)
+        Merge Join Operator [MERGEJOIN_17] (rows=2 width=22)
           Conds:RS_8._col3, true=RS_9._col0, _col1(Left Semi),Output:["_col0","_col1","_col2"]
         <-Map 1 [SIMPLE_EDGE]
           SHUFFLE [RS_8]
@@ -317,7 +317,7 @@ Stage-0
             Select Operator [SEL_2] (rows=2 width=20)
               Output:["_col0","_col1","_col2","_col3"]
               Filter Operator [FIL_15] (rows=2 width=20)
-                predicate:((dimid) IN (100, 200) and ((dimid = 100) = true) and (dimid = 100) is not null)
+                predicate:((dimid) IN (100, 200) and ((dimid = 100) = true))
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -327,8 +327,8 @@ Stage-0
               Output:["_col0","_col1"],keys:_col0, _col1
               Select Operator [SEL_5] (rows=1 width=3)
                 Output:["_col0","_col1"]
-                Filter Operator [FIL_17] (rows=1 width=3)
-                  predicate:((id) IN (100, 200) and ((id = 100) = true) and (id = 100) is not null)
+                Filter Operator [FIL_16] (rows=1 width=3)
+                  predicate:((id) IN (100, 200) and ((id = 100) = true))
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -359,25 +359,26 @@ Stage-0
     Stage-1
       Reducer 2
       File Output Operator [FS_12]
-        Merge Join Operator [MERGEJOIN_18] (rows=2 width=22)
-          Conds:RS_8.200, true=RS_9.200, false(Left Semi),Output:["_col0","_col1","_col2"]
+        Merge Join Operator [MERGEJOIN_17] (rows=2 width=22)
+          Conds:RS_8.200, true=RS_9._col0, _col1(Left Semi),Output:["_col0","_col1","_col2"]
         <-Map 1 [SIMPLE_EDGE]
           SHUFFLE [RS_8]
             PartitionCols:200, true
             Select Operator [SEL_2] (rows=2 width=20)
               Output:["_col0","_col1","_col2"]
               Filter Operator [FIL_15] (rows=2 width=20)
-                predicate:(((dimid = 100) = true) and (dimid = 200) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and (dimid = 200))
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
           SHUFFLE [RS_9]
-            PartitionCols:200, false
+            PartitionCols:_col0, _col1
             Group By Operator [GBY_7] (rows=1 width=3)
-              Output:["_col0","_col1"],keys:200, false
+              Output:["_col0","_col1"],keys:_col0, _col1
               Select Operator [SEL_5] (rows=1 width=3)
-                Filter Operator [FIL_17] (rows=1 width=3)
-                  predicate:(((id = 100) = true) and (id = 200) and (id = 100) is not null)
+                Output:["_col0","_col1"]
+                Filter Operator [FIL_16] (rows=1 width=3)
+                  predicate:(((id = 100) = true) and (id = 200))
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -406,25 +407,26 @@ Stage-0
     Stage-1
       Reducer 2
       File Output Operator [FS_12]
-        Merge Join Operator [MERGEJOIN_18] (rows=2 width=22)
-          Conds:RS_8.100, true=RS_9.100, true(Left Semi),Output:["_col0","_col1","_col2"]
+        Merge Join Operator [MERGEJOIN_17] (rows=2 width=22)
+          Conds:RS_8.100, true=RS_9._col0, _col1(Left Semi),Output:["_col0","_col1","_col2"]
         <-Map 1 [SIMPLE_EDGE]
           SHUFFLE [RS_8]
             PartitionCols:100, true
             Select Operator [SEL_2] (rows=2 width=20)
               Output:["_col0","_col1","_col2"]
               Filter Operator [FIL_15] (rows=2 width=20)
-                predicate:(((dimid = 100) = true) and (dimid = 100) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and (dimid = 100))
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
           SHUFFLE [RS_9]
-            PartitionCols:100, true
+            PartitionCols:_col0, _col1
             Group By Operator [GBY_7] (rows=1 width=3)
-              Output:["_col0","_col1"],keys:100, true
+              Output:["_col0","_col1"],keys:_col0, _col1
               Select Operator [SEL_5] (rows=1 width=3)
-                Filter Operator [FIL_17] (rows=1 width=3)
-                  predicate:(((id = 100) = true) and (id = 100) and (id = 100) is not null)
+                Output:["_col0","_col1"]
+                Filter Operator [FIL_16] (rows=1 width=3)
+                  predicate:(((id = 100) = true) and (id = 100))
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -455,7 +457,7 @@ Stage-0
     Stage-1
       Reducer 2
       File Output Operator [FS_12]
-        Merge Join Operator [MERGEJOIN_18] (rows=5 width=22)
+        Merge Join Operator [MERGEJOIN_17] (rows=5 width=22)
           Conds:RS_8._col3, true=RS_9._col0, _col1(Left Semi),Output:["_col0","_col1","_col2"]
         <-Map 1 [SIMPLE_EDGE]
           SHUFFLE [RS_8]
@@ -463,7 +465,7 @@ Stage-0
             Select Operator [SEL_2] (rows=5 width=20)
               Output:["_col0","_col1","_col2","_col3"]
               Filter Operator [FIL_15] (rows=5 width=20)
-                predicate:(((dimid = 100) = true) and dimid is not null and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and dimid is not null)
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -473,8 +475,8 @@ Stage-0
               Output:["_col0","_col1"],keys:_col0, _col1
               Select Operator [SEL_5] (rows=2 width=3)
                 Output:["_col0","_col1"]
-                Filter Operator [FIL_17] (rows=2 width=3)
-                  predicate:(((id = 100) = true) and id is not null and (id = 100) is not null)
+                Filter Operator [FIL_16] (rows=2 width=3)
+                  predicate:(((id = 100) = true) and id is not null)
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/cross_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cross_join.q.out b/ql/src/test/results/clientpositive/tez/cross_join.q.out
index 0fa801e..9d1fbcc 100644
--- a/ql/src/test/results/clientpositive/tez/cross_join.q.out
+++ b/ql/src/test/results/clientpositive/tez/cross_join.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- current
 explain select src.key from src join src src2
 PREHOOK: type: QUERY
@@ -64,7 +64,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- ansi cross join
 explain select src.key from src cross join src src2
 PREHOOK: type: QUERY
@@ -206,7 +206,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain select src.key from src join src src2
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select src.key from src join src src2
@@ -266,7 +266,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain select src.key from src cross join src src2
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select src.key from src cross join src src2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
index 470590a..bc021f5 100644
--- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
 POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-Warning: Shuffle Join MERGEJOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join B
@@ -99,7 +99,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
+Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
@@ -204,7 +204,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[27][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[26][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join 
          (select d1.key 
           from B d1 join B d2 on d1.key = d2.key
@@ -331,8 +331,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 4' is a cross product
-Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[19][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 4' is a cross product
+Warning: Shuffle Join MERGEJOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1  group by d1.key) od1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1  group by d1.key) od1
@@ -442,7 +442,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
+Warning: Shuffle Join MERGEJOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: explain select * from 
 (select A.key from A  group by key) ss join
 (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
index 68df37d..b349de5 100644
--- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
 POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join B
@@ -95,7 +95,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Map 3' is a cross product
+Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Map 3' is a cross product
 PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
@@ -191,7 +191,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[26][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain select * from A join 
          (select d1.key 
           from B d1 join B d2 on d1.key = d2.key 
@@ -310,8 +310,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Map 2' is a cross product
-Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 2' is a cross product
+Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
@@ -413,7 +413,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Reducer 2' is a cross product
+Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from 
 (select A.key from A group by key) ss join 
 (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
index bbe4296..369cd57 100644
--- a/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 with q1 as (select * from src where key= '5')
 select a.key
@@ -23,21 +24,19 @@ Stage-0
       File Output Operator [FS_10]
         Select Operator [SEL_9] (rows=275 width=10)
           Output:["_col0"]
-          Merge Join Operator [MERGEJOIN_15] (rows=275 width=10)
-            Conds:RS_6.'5'=RS_7.'5'(Inner)
+          Merge Join Operator [MERGEJOIN_13] (rows=275 width=10)
+            Conds:(Inner)
           <-Map 1 [SIMPLE_EDGE]
             SHUFFLE [RS_6]
-              PartitionCols:'5'
               Select Operator [SEL_2] (rows=250 width=10)
-                Filter Operator [FIL_13] (rows=250 width=10)
+                Filter Operator [FIL_11] (rows=250 width=10)
                   predicate:(key = '5')
                   TableScan [TS_0] (rows=500 width=10)
                     default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
           <-Map 3 [SIMPLE_EDGE]
             SHUFFLE [RS_7]
-              PartitionCols:'5'
               Select Operator [SEL_5] (rows=250 width=10)
-                Filter Operator [FIL_14] (rows=250 width=10)
+                Filter Operator [FIL_12] (rows=250 width=10)
                   predicate:(key = '5')
                   TableScan [TS_3] (rows=500 width=10)
                     default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
index bbe4296..369cd57 100644
--- a/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 with q1 as (select * from src where key= '5')
 select a.key
@@ -23,21 +24,19 @@ Stage-0
       File Output Operator [FS_10]
         Select Operator [SEL_9] (rows=275 width=10)
           Output:["_col0"]
-          Merge Join Operator [MERGEJOIN_15] (rows=275 width=10)
-            Conds:RS_6.'5'=RS_7.'5'(Inner)
+          Merge Join Operator [MERGEJOIN_13] (rows=275 width=10)
+            Conds:(Inner)
           <-Map 1 [SIMPLE_EDGE]
             SHUFFLE [RS_6]
-              PartitionCols:'5'
               Select Operator [SEL_2] (rows=250 width=10)
-                Filter Operator [FIL_13] (rows=250 width=10)
+                Filter Operator [FIL_11] (rows=250 width=10)
                   predicate:(key = '5')
                   TableScan [TS_0] (rows=500 width=10)
                     default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
           <-Map 3 [SIMPLE_EDGE]
             SHUFFLE [RS_7]
-              PartitionCols:'5'
               Select Operator [SEL_5] (rows=250 width=10)
-                Filter Operator [FIL_14] (rows=250 width=10)
+                Filter Operator [FIL_12] (rows=250 width=10)
                   predicate:(key = '5')
                   TableScan [TS_3] (rows=500 width=10)
                     default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]


[44/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
new file mode 100644
index 0000000..cf20851
--- /dev/null
+++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out
@@ -0,0 +1,683 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                null sort order: a
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                tag: 0
+                value expressions: _col1 (type: string)
+                auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), ds (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                null sort order: a
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                tag: 1
+                value expressions: _col1 (type: string), _col2 (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
+        /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+          filter mappings:
+            1 [0, 1]
+          filter predicates:
+            0 
+            1 {(VALUE._col1 = '2008-04-08')}
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0) and (UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
+            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3
+                    columns.types string:string:string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  tag: 0
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                  tag: 1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [$hdt$_0:a]
+        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
+        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean)
+            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
+#### A masked pattern was here ####
+              NumFilesPerFileSink: 1
+              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3
+                    columns.types string:string:string:string
+                    escape.delim \
+                    hive.serialization.extend.additional.nesting.levels true
+                    serialization.escape.crlf true
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out b/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
deleted file mode 100644
index 825e668..0000000
--- a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
+++ /dev/null
@@ -1,70 +0,0 @@
-PREHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS parquet_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_table
-POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE [(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, comment:), ]
-PREHOOK: query: SELECT * FROM parquet_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM parquet_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-{"key4":null,"key3":"val3"}
-{"key4":null,"key3":"val3"}
-{"key2":"val2","key1":null}
-{"key4":null,"key3":"val3"}
-{"key4":null,"key3":"val3"}
-PREHOOK: query: DROP TABLE avro_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: DROP TABLE avro_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: DROP TABLE parquet_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_table
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: DROP TABLE parquet_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_table
-POSTHOOK: Output: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out b/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out
deleted file mode 100644
index 1462cc2..0000000
--- a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.8.out
+++ /dev/null
@@ -1,70 +0,0 @@
-PREHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE IF EXISTS parquet_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE IF EXISTS parquet_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: database:default
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@parquet_table
-POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE [(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, comment:), ]
-PREHOOK: query: SELECT * FROM parquet_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM parquet_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@parquet_table
-#### A masked pattern was here ####
-{"key3":"val3","key4":null}
-{"key3":"val3","key4":null}
-{"key1":null,"key2":"val2"}
-{"key3":"val3","key4":null}
-{"key3":"val3","key4":null}
-PREHOOK: query: DROP TABLE avro_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: DROP TABLE avro_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: DROP TABLE parquet_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@parquet_table
-PREHOOK: Output: default@parquet_table
-POSTHOOK: query: DROP TABLE parquet_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@parquet_table
-POSTHOOK: Output: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/parquet_map_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.out b/ql/src/test/results/clientpositive/parquet_map_null.q.out
new file mode 100644
index 0000000..d1357c1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/parquet_map_null.q.out
@@ -0,0 +1,68 @@
+PREHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values
+
+DROP TABLE IF EXISTS avro_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values
+
+DROP TABLE IF EXISTS avro_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS parquet_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS parquet_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@avro_table
+PREHOOK: Output: database:default
+PREHOOK: Output: default@parquet_table
+POSTHOOK: query: CREATE TABLE parquet_table STORED AS PARQUET AS SELECT * FROM avro_table
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@avro_table
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@parquet_table
+POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE [(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, comment:), ]
+PREHOOK: query: SELECT * FROM parquet_table
+PREHOOK: type: QUERY
+PREHOOK: Input: default@parquet_table
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM parquet_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@parquet_table
+#### A masked pattern was here ####
+{"key3":"val3","key4":null}
+{"key3":"val3","key4":null}
+{"key1":null,"key2":"val2"}
+{"key3":"val3","key4":null}
+{"key3":"val3","key4":null}
+PREHOOK: query: DROP TABLE avro_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@avro_table
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: DROP TABLE avro_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@avro_table
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: DROP TABLE parquet_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@parquet_table
+PREHOOK: Output: default@parquet_table
+POSTHOOK: query: DROP TABLE parquet_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@parquet_table
+POSTHOOK: Output: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.java1.7.out b/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
deleted file mode 100644
index dda4adc..0000000
--- a/ql/src/test/results/clientpositive/plan_json.q.java1.7.out
+++ /dev/null
@@ -1,13 +0,0 @@
-PREHOOK: query: -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Group By Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.java1.8.out b/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
deleted file mode 100644
index dda4adc..0000000
--- a/ql/src/test/results/clientpositive/plan_json.q.java1.8.out
+++ /dev/null
@@ -1,13 +0,0 @@
-PREHOOK: query: -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN FORMATTED SELECT count(1) FROM src
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Group By Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/plan_json.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/plan_json.q.out b/ql/src/test/results/clientpositive/plan_json.q.out
new file mode 100644
index 0000000..98c6626
--- /dev/null
+++ b/ql/src/test/results/clientpositive/plan_json.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
+
+
+EXPLAIN FORMATTED SELECT count(1) FROM src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
+
+
+EXPLAIN FORMATTED SELECT count(1) FROM src
+POSTHOOK: type: QUERY
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Select Operator":{"Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Group By Operator":{"aggregations:":["count(1)"],"mode:":"hash","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","value expressions:":"_col0 (type: bigint)"}}}}}}}}],"Reduce Operator Tree:":{"Group By Operator":{"aggregations:":["count(VALUE._col0)"],"mode:":"mergepartial","outputColumnNames:":["_col0"],"Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column 
 stats: COMPLETE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out b/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
deleted file mode 100644
index b3a58d0..0000000
--- a/ql/src/test/results/clientpositive/spark/join0.q.java1.7.out
+++ /dev/null
@@ -1,238 +0,0 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out b/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out
deleted file mode 100644
index 7acd108..0000000
--- a/ql/src/test/results/clientpositive/spark/join0.q.java1.8.out
+++ /dev/null
@@ -1,238 +0,0 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join0.q.out b/ql/src/test/results/clientpositive/spark/join0.q.out
index 56b154f..bc98bb4 100644
--- a/ql/src/test/results/clientpositive/spark/join0.q.out
+++ b/ql/src/test/results/clientpositive/spark/join0.q.out
@@ -1,5 +1,7 @@
 Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
   (SELECT * FROM src WHERE src.key < 10) src1 
@@ -7,7 +9,9 @@ SELECT src1.key as k1, src1.value as v1,
   (SELECT * FROM src WHERE src.key < 10) src2
   SORT BY k1, v1, k2, v2
 PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
 SELECT src1.key as k1, src1.value as v1, 
        src2.key as k2, src2.value as v2 FROM 
   (SELECT * FROM src WHERE src.key < 10) src1 
@@ -24,7 +28,7 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (SORT, 1)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 4)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -64,9 +68,9 @@ STAGE PLANS:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
-                condition expressions:
-                  0 {VALUE._col0} {VALUE._col1}
-                  1 {VALUE._col0} {VALUE._col1}
+                keys:
+                  0 
+                  1 
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
@@ -83,8 +87,8 @@ STAGE PLANS:
                   compressed: false
                   Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                   table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
deleted file mode 100644
index 3040544..0000000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.7.out
+++ /dev/null
@@ -1,252 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 1
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Static Partition Specification: ds=2008-04-08/hr=11/
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          properties:
-                            bucket_count -1
-                            columns key,value
-                            columns.comments 
-                            columns.types string:string
-#### A masked pattern was here ####
-                            name default.list_bucketing_static_part
-                            partition_columns ds/hr
-                            partition_columns.types string:string
-                            serialization.ddl struct list_bucketing_static_part { string key, string value}
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.list_bucketing_static_part
-                      TotalFiles: 1
-                      GatherStats: true
-                      MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5520                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [51], [103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, [103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   


[12/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
index 17d5992..d02e096 100644
--- a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -186,7 +186,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -315,7 +315,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -468,7 +468,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -602,7 +602,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -734,7 +734,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index 9475487..b0f3071 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -1388,12 +1388,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -1417,7 +1417,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1497,9 +1497,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -1545,7 +1545,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1646,9 +1646,9 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
             LLAP IO: all inputs
@@ -1660,7 +1660,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1740,9 +1740,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -1773,7 +1773,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1867,9 +1867,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       sort order: +
-                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -1915,7 +1915,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                  0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                   1 UDFToString(_col0) (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1981,6 +1981,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -2005,17 +2006,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Map 4 
@@ -2024,16 +2021,18 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -2043,9 +2042,9 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col0 (type: string)
-                  1 '2008-04-08' (type: string)
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+                  0 
+                  1 
+                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -2074,32 +2073,15 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
-                keys: '2008-04-08' (type: string)
+                keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    sort order: 
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -2107,21 +2089,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
@@ -2137,7 +2116,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
 EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
@@ -2238,7 +2217,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -4372,7 +4351,7 @@ STAGE PLANS:
                            Inner Join 0 to 1
                       keys:
                         0 UDFToDouble(_col0) (type: double)
-                        1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                       input vertices:
                         1 Map 3
                       Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
@@ -4401,12 +4380,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -4494,7 +4473,7 @@ STAGE PLANS:
                       condition map:
                            Inner Join 0 to 1
                       keys:
-                        0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                        0 (UDFToDouble(_col0) * 2.0) (type: double)
                         1 _col0 (type: double)
                       input vertices:
                         1 Map 3
@@ -4599,6 +4578,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -4614,60 +4594,72 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Map 1 <- Reducer 4 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 4 <- Map 3 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (BROADCAST_EDGE), Map 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Map Join Operator
-                      condition map:
-                           Inner Join 0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 '2008-04-08' (type: string)
-                      input vertices:
-                        1 Reducer 4
-                      Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
-        Map 3 
+        Map 2 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
-        Reducer 2 
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Inner Join 0 to 1
+                    keys:
+                      0 
+                      1 
+                    input vertices:
+                      0 Map 1
+                    Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 4 
             Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
@@ -4682,36 +4674,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Execution mode: vectorized, llap
-            Reduce Operator Tree:
-              Group By Operator
-                keys: '2008-04-08' (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -4719,21 +4681,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/masking_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_2.q.out b/ql/src/test/results/clientpositive/masking_2.q.out
index f998cbd..ff045a9 100644
--- a/ql/src/test/results/clientpositive/masking_2.q.out
+++ b/ql/src/test/results/clientpositive/masking_2.q.out
@@ -192,7 +192,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and UDFToInteger(key) is not null) (type: boolean)
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out
index a85fd8b..8dd86de 100644
--- a/ql/src/test/results/clientpositive/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/mergejoin.q.out
@@ -2692,6 +2692,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 NULL	NULL	NULL	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select * from
 (select * from tab where tab.key = 0)a
 full outer join
@@ -2699,6 +2700,7 @@ full outer join
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab
 PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: select * from
 (select * from tab where tab.key = 0)a
@@ -2707,7 +2709,9 @@ full outer join
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab
 POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select * from
 (select * from tab where tab.key = 0)a
 full outer join
@@ -2730,7 +2734,7 @@ NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * from
 (select * from tab where tab.key = 0)a
 join

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
index ea25416..71ba502 100644
--- a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
+++ b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- This test query is introduced for HIVE-4968.
 -- First, we do not convert the join to MapJoin.
 EXPLAIN
@@ -140,7 +140,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
       FROM (SELECT *
@@ -188,7 +188,7 @@ POSTHOOK: Input: default@src1
 406	val_406	25
 66	val_66	25
 98	val_98	25
-Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: -- Then, we convert the join to MapJoin.
 EXPLAIN
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
@@ -296,7 +296,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
       FROM (SELECT *

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/orc_llap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_llap.q.out b/ql/src/test/results/clientpositive/orc_llap.q.out
index bae69bb..658c41d 100644
--- a/ql/src/test/results/clientpositive/orc_llap.q.out
+++ b/ql/src/test/results/clientpositive/orc_llap.q.out
@@ -81,7 +81,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: default@cross_numbers
 POSTHOOK: Lineage: cross_numbers.i EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: insert into table orc_llap
 select ctinyint + i, csmallint + i, cint + i, cbigint + i, cfloat + i, cdouble + i, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2
 from alltypesorc cross join cross_numbers
@@ -121,7 +121,7 @@ POSTHOOK: Output: default@orc_llap_small
 POSTHOOK: Lineage: orc_llap_small.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: orc_llap_small.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: orc_llap_small.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- Cross join with no projection - do it on small table
 explain
 select count(1) from orc_llap_small y join orc_llap_small x
@@ -202,7 +202,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select count(1) from orc_llap_small y join orc_llap_small x
 PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_llap_small
@@ -657,7 +657,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@llap_temp_table
 #### A masked pattern was here ####
 -735462183586256
-Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-4:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-4:MAPRED' is a cross product
 PREHOOK: query: -- multi-stripe test
 insert into table orc_llap
 select ctinyint + i, csmallint + i, cint + i, cbigint + i, cfloat + i, cdouble + i, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/partition_multilevels.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_multilevels.q.out b/ql/src/test/results/clientpositive/partition_multilevels.q.out
index 948d3a0..96aa76f 100644
--- a/ql/src/test/results/clientpositive/partition_multilevels.q.out
+++ b/ql/src/test/results/clientpositive/partition_multilevels.q.out
@@ -991,29 +991,29 @@ STAGE PLANS:
             Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: level2 (type: string), level3 (type: string)
-              outputColumnNames: _col1, _col2
+              outputColumnNames: level2, level3
               Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: '2222' (type: string), _col1 (type: string), _col2 (type: string)
+                keys: level2 (type: string), level3 (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '2222' (type: string), _col1 (type: string), _col2 (type: string)
-                  sort order: +++
-                  Map-reduce partition columns: '2222' (type: string), _col1 (type: string), _col2 (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: '2222' (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: '2222' (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
+            expressions: '2222' (type: string), _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3
             Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -1583,29 +1583,29 @@ STAGE PLANS:
             Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: level2 (type: string), level3 (type: string)
-              outputColumnNames: _col1, _col2
+              outputColumnNames: level2, level3
               Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: '2222' (type: string), _col1 (type: string), _col2 (type: string)
+                keys: level2 (type: string), level3 (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '2222' (type: string), _col1 (type: string), _col2 (type: string)
-                  sort order: +++
-                  Map-reduce partition columns: '2222' (type: string), _col1 (type: string), _col2 (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col3 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: '2222' (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: '2222' (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint)
+            expressions: '2222' (type: string), _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
             outputColumnNames: _col0, _col1, _col2, _col3
             Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out
index 7222617..9daddfb 100644
--- a/ql/src/test/results/clientpositive/pcr.q.out
+++ b/ql/src/test/results/clientpositive/pcr.q.out
@@ -1505,7 +1505,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: 14 (type: int), KEY.reducesinkkey1 (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -2327,23 +2327,27 @@ STAGE PLANS:
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3, _col4
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: int), _col4 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col3,_col4
-                  columns.types int,string,int,string
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3,_col4
+                    columns.types int,string,string,int,string
+                    escape.delim \
+                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -2356,7 +2360,7 @@ STAGE PLANS:
               sort order: +
               Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col1 (type: string), _col3 (type: int), _col4 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -2367,8 +2371,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col3,_col4
-              columns.types int,string,int,string
+              columns _col0,_col1,_col2,_col3,_col4
+              columns.types int,string,string,int,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -2376,8 +2380,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col3,_col4
-                columns.types int,string,int,string
+                columns _col0,_col1,_col2,_col3,_col4
+                columns.types int,string,string,int,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -2386,7 +2390,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), '2000-04-08' (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -2627,23 +2631,27 @@ STAGE PLANS:
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3, _col4
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: int), _col4 (type: string), '2000-04-09' (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col3,_col4
-                  columns.types int,string,int,string
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3,_col4,_col5
+                    columns.types int,string,string,int,string,string
+                    escape.delim \
+                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -2656,7 +2664,7 @@ STAGE PLANS:
               sort order: +
               Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col1 (type: string), _col3 (type: int), _col4 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string), _col5 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -2667,8 +2675,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col3,_col4
-              columns.types int,string,int,string
+              columns _col0,_col1,_col2,_col3,_col4,_col5
+              columns.types int,string,string,int,string,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -2676,8 +2684,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col3,_col4
-                columns.types int,string,int,string
+                columns _col0,_col1,_col2,_col3,_col4,_col5
+                columns.types int,string,string,int,string,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -2686,7 +2694,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), '2000-04-09' (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -4845,7 +4853,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), '2008-04-08' (type: string), KEY.reducesinkkey2 (type: string)
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -5024,7 +5032,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), '11' (type: string)
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query18.q.out b/ql/src/test/results/clientpositive/perf/query18.q.out
index a1c9da5..5bd0f1f 100644
--- a/ql/src/test/results/clientpositive/perf/query18.q.out
+++ b/ql/src/test/results/clientpositive/perf/query18.q.out
@@ -49,11 +49,11 @@ Stage-0
                               <-Map 15 [SIMPLE_EDGE]
                                 SHUFFLE [RS_28]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_20] (rows=5047 width=16)
+                                  Select Operator [SEL_20] (rows=388 width=208)
                                     Output:["_col0","_col3"]
-                                    Filter Operator [FIL_77] (rows=5047 width=16)
+                                    Filter Operator [FIL_77] (rows=388 width=208)
                                       predicate:((cd_gender = 'M') and (cd_education_status = 'College') and cd_demo_sk is not null)
-                                      TableScan [TS_18] (rows=20191 width=16)
+                                      TableScan [TS_18] (rows=1553 width=208)
                                         default@customer_demographics,cd1,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_education_status","cd_dep_count"]
                               <-Reducer 11 [SIMPLE_EDGE]
                                 SHUFFLE [RS_27]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query26.q.out b/ql/src/test/results/clientpositive/perf/query26.q.out
index 98a85f7..6564631 100644
--- a/ql/src/test/results/clientpositive/perf/query26.q.out
+++ b/ql/src/test/results/clientpositive/perf/query26.q.out
@@ -38,11 +38,11 @@ Stage-0
                       <-Map 11 [SIMPLE_EDGE]
                         SHUFFLE [RS_25]
                           PartitionCols:_col0
-                          Select Operator [SEL_14] (rows=3106 width=13)
+                          Select Operator [SEL_14] (rows=132 width=304)
                             Output:["_col0"]
-                            Filter Operator [FIL_54] (rows=3106 width=13)
+                            Filter Operator [FIL_54] (rows=132 width=304)
                               predicate:((cd_gender = 'F') and (cd_marital_status = 'W') and (cd_education_status = 'Primary') and cd_demo_sk is not null)
-                              TableScan [TS_12] (rows=24850 width=13)
+                              TableScan [TS_12] (rows=1062 width=304)
                                 default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_24]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query27.q.out b/ql/src/test/results/clientpositive/perf/query27.q.out
index 36302ca..d3e568d 100644
--- a/ql/src/test/results/clientpositive/perf/query27.q.out
+++ b/ql/src/test/results/clientpositive/perf/query27.q.out
@@ -38,11 +38,11 @@ Stage-0
                       <-Map 11 [SIMPLE_EDGE]
                         SHUFFLE [RS_25]
                           PartitionCols:_col0
-                          Select Operator [SEL_14] (rows=3106 width=13)
+                          Select Operator [SEL_14] (rows=132 width=304)
                             Output:["_col0"]
-                            Filter Operator [FIL_54] (rows=3106 width=13)
+                            Filter Operator [FIL_54] (rows=132 width=304)
                               predicate:((cd_gender = 'F') and (cd_marital_status = 'D') and (cd_education_status = 'Unknown') and cd_demo_sk is not null)
-                              TableScan [TS_12] (rows=24850 width=13)
+                              TableScan [TS_12] (rows=1062 width=304)
                                 default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_24]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query28.q.out b/ql/src/test/results/clientpositive/perf/query28.q.out
index 0157928..9e956f5 100644
--- a/ql/src/test/results/clientpositive/perf/query28.q.out
+++ b/ql/src/test/results/clientpositive/perf/query28.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 3' is a cross product
+Warning: Shuffle Join MERGEJOIN[58][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: explain select  *
 from (select avg(ss_list_price) B1_LP
             ,count(ss_list_price) B1_CNT
@@ -117,15 +117,15 @@ Stage-0
     limit:100
     Stage-1
       Reducer 3
-      File Output Operator [FS_56]
-        Limit [LIM_55] (rows=5 width=149)
+      File Output Operator [FS_51]
+        Limit [LIM_50] (rows=5 width=149)
           Number of rows:100
-          Select Operator [SEL_54] (rows=5 width=149)
+          Select Operator [SEL_49] (rows=5 width=149)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"]
-            Merge Join Operator [MERGEJOIN_63] (rows=5 width=149)
+            Merge Join Operator [MERGEJOIN_58] (rows=5 width=149)
               Conds:(Inner),(Inner),(Inner),(Inner),(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"]
             <-Reducer 11 [SIMPLE_EDGE]
-              SHUFFLE [RS_51]
+              SHUFFLE [RS_46]
                 Group By Operator [GBY_33] (rows=1 width=136)
                   Output:["_col0","_col1","_col2"],aggregations:["avg(VALUE._col0)","count(VALUE._col1)","count(DISTINCT KEY._col0:0._col0)"]
                 <-Map 10 [SIMPLE_EDGE]
@@ -134,12 +134,12 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3"],aggregations:["avg(ss_list_price)","count(ss_list_price)","count(DISTINCT ss_list_price)"],keys:ss_list_price
                       Select Operator [SEL_30] (rows=431996724 width=88)
                         Output:["ss_list_price"]
-                        Filter Operator [FIL_61] (rows=431996724 width=88)
+                        Filter Operator [FIL_56] (rows=431996724 width=88)
                           predicate:(ss_quantity BETWEEN 11 AND 15 and (ss_list_price BETWEEN 66 AND 76 or ss_coupon_amt BETWEEN 920 AND 1920 or ss_wholesale_cost BETWEEN 4 AND 24))
                           TableScan [TS_28] (rows=575995635 width=88)
                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_quantity","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
             <-Reducer 13 [SIMPLE_EDGE]
-              SHUFFLE [RS_52]
+              SHUFFLE [RS_47]
                 Group By Operator [GBY_40] (rows=1 width=136)
                   Output:["_col0","_col1","_col2"],aggregations:["avg(VALUE._col0)","count(VALUE._col1)","count(DISTINCT KEY._col0:0._col0)"]
                 <-Map 12 [SIMPLE_EDGE]
@@ -148,12 +148,12 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3"],aggregations:["avg(ss_list_price)","count(ss_list_price)","count(DISTINCT ss_list_price)"],keys:ss_list_price
                       Select Operator [SEL_37] (rows=431996724 width=88)
                         Output:["ss_list_price"]
-                        Filter Operator [FIL_62] (rows=431996724 width=88)
+                        Filter Operator [FIL_57] (rows=431996724 width=88)
                           predicate:(ss_quantity BETWEEN 6 AND 10 and (ss_list_price BETWEEN 91 AND 101 or ss_coupon_amt BETWEEN 1430 AND 2430 or ss_wholesale_cost BETWEEN 32 AND 52))
                           TableScan [TS_35] (rows=575995635 width=88)
                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_quantity","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
             <-Reducer 2 [SIMPLE_EDGE]
-              SHUFFLE [RS_47]
+              SHUFFLE [RS_42]
                 Group By Operator [GBY_5] (rows=1 width=136)
                   Output:["_col0","_col1","_col2"],aggregations:["avg(VALUE._col0)","count(VALUE._col1)","count(DISTINCT KEY._col0:0._col0)"]
                 <-Map 1 [SIMPLE_EDGE]
@@ -162,12 +162,12 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3"],aggregations:["avg(ss_list_price)","count(ss_list_price)","count(DISTINCT ss_list_price)"],keys:ss_list_price
                       Select Operator [SEL_2] (rows=431996724 width=88)
                         Output:["ss_list_price"]
-                        Filter Operator [FIL_57] (rows=431996724 width=88)
+                        Filter Operator [FIL_52] (rows=431996724 width=88)
                           predicate:(ss_quantity BETWEEN 0 AND 5 and (ss_list_price BETWEEN 11 AND 21 or ss_coupon_amt BETWEEN 460 AND 1460 or ss_wholesale_cost BETWEEN 14 AND 34))
                           TableScan [TS_0] (rows=575995635 width=88)
                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_quantity","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
             <-Reducer 5 [SIMPLE_EDGE]
-              SHUFFLE [RS_48]
+              SHUFFLE [RS_43]
                 Group By Operator [GBY_12] (rows=1 width=136)
                   Output:["_col0","_col1","_col2"],aggregations:["avg(VALUE._col0)","count(VALUE._col1)","count(DISTINCT KEY._col0:0._col0)"]
                 <-Map 4 [SIMPLE_EDGE]
@@ -176,12 +176,12 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3"],aggregations:["avg(ss_list_price)","count(ss_list_price)","count(DISTINCT ss_list_price)"],keys:ss_list_price
                       Select Operator [SEL_9] (rows=431996724 width=88)
                         Output:["ss_list_price"]
-                        Filter Operator [FIL_58] (rows=431996724 width=88)
+                        Filter Operator [FIL_53] (rows=431996724 width=88)
                           predicate:(ss_quantity BETWEEN 26 AND 30 and (ss_list_price BETWEEN 28 AND 38 or ss_coupon_amt BETWEEN 2513 AND 3513 or ss_wholesale_cost BETWEEN 42 AND 62))
                           TableScan [TS_7] (rows=575995635 width=88)
                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_quantity","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
             <-Reducer 7 [SIMPLE_EDGE]
-              SHUFFLE [RS_49]
+              SHUFFLE [RS_44]
                 Group By Operator [GBY_19] (rows=1 width=136)
                   Output:["_col0","_col1","_col2"],aggregations:["avg(VALUE._col0)","count(VALUE._col1)","count(DISTINCT KEY._col0:0._col0)"]
                 <-Map 6 [SIMPLE_EDGE]
@@ -190,12 +190,12 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3"],aggregations:["avg(ss_list_price)","count(ss_list_price)","count(DISTINCT ss_list_price)"],keys:ss_list_price
                       Select Operator [SEL_16] (rows=431996724 width=88)
                         Output:["ss_list_price"]
-                        Filter Operator [FIL_59] (rows=431996724 width=88)
+                        Filter Operator [FIL_54] (rows=431996724 width=88)
                           predicate:(ss_quantity BETWEEN 21 AND 25 and (ss_list_price BETWEEN 135 AND 145 or ss_coupon_amt BETWEEN 14180 AND 15180 or ss_wholesale_cost BETWEEN 38 AND 58))
                           TableScan [TS_14] (rows=575995635 width=88)
                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_quantity","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
             <-Reducer 9 [SIMPLE_EDGE]
-              SHUFFLE [RS_50]
+              SHUFFLE [RS_45]
                 Group By Operator [GBY_26] (rows=1 width=136)
                   Output:["_col0","_col1","_col2"],aggregations:["avg(VALUE._col0)","count(VALUE._col1)","count(DISTINCT KEY._col0:0._col0)"]
                 <-Map 8 [SIMPLE_EDGE]
@@ -204,7 +204,7 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3"],aggregations:["avg(ss_list_price)","count(ss_list_price)","count(DISTINCT ss_list_price)"],keys:ss_list_price
                       Select Operator [SEL_23] (rows=431996724 width=88)
                         Output:["ss_list_price"]
-                        Filter Operator [FIL_60] (rows=431996724 width=88)
+                        Filter Operator [FIL_55] (rows=431996724 width=88)
                           predicate:(ss_quantity BETWEEN 16 AND 20 and (ss_list_price BETWEEN 142 AND 152 or ss_coupon_amt BETWEEN 3054 AND 4054 or ss_wholesale_cost BETWEEN 80 AND 100))
                           TableScan [TS_21] (rows=575995635 width=88)
                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_quantity","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]


[27/66] [abbrv] hive git commit: Preparing for 2.2.0 development

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
new file mode 100644
index 0000000..8735b50
--- /dev/null
+++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql
@@ -0,0 +1,1017 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+------------------------------------------------------------------
+-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+------------------------------------------------------------------
+-- Complete schema required for the following classes:-
+--     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+--     org.apache.hadoop.hive.metastore.model.MDBPrivilege
+--     org.apache.hadoop.hive.metastore.model.MDatabase
+--     org.apache.hadoop.hive.metastore.model.MDelegationToken
+--     org.apache.hadoop.hive.metastore.model.MFieldSchema
+--     org.apache.hadoop.hive.metastore.model.MFunction
+--     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+--     org.apache.hadoop.hive.metastore.model.MIndex
+--     org.apache.hadoop.hive.metastore.model.MMasterKey
+--     org.apache.hadoop.hive.metastore.model.MOrder
+--     org.apache.hadoop.hive.metastore.model.MPartition
+--     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+--     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+--     org.apache.hadoop.hive.metastore.model.MPartitionEvent
+--     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+--     org.apache.hadoop.hive.metastore.model.MResourceUri
+--     org.apache.hadoop.hive.metastore.model.MRole
+--     org.apache.hadoop.hive.metastore.model.MRoleMap
+--     org.apache.hadoop.hive.metastore.model.MSerDeInfo
+--     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+--     org.apache.hadoop.hive.metastore.model.MStringList
+--     org.apache.hadoop.hive.metastore.model.MTable
+--     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+--     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+--     org.apache.hadoop.hive.metastore.model.MTablePrivilege
+--     org.apache.hadoop.hive.metastore.model.MType
+--     org.apache.hadoop.hive.metastore.model.MVersionTable
+--
+-- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID int NOT NULL,
+    MASTER_KEY nvarchar(767) NULL
+);
+
+ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DEFERRED_REBUILD bit NOT NULL,
+    INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+    INDEX_NAME nvarchar(128) NULL,
+    INDEX_TBL_ID bigint NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    ORIG_TBL_ID bigint NULL,
+    SD_ID bigint NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+CREATE TABLE PART_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" nvarchar(1000) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    PART_ID bigint NULL,
+    PARTITION_NAME nvarchar(767) NOT NULL,
+    "TABLE_NAME" nvarchar(128) NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PART_ID bigint NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID bigint NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    ROLE_NAME nvarchar(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    PART_NAME nvarchar(767) NULL,
+    SD_ID bigint NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+CREATE TABLE CDS
+(
+    CD_ID bigint NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
+CREATE TABLE VERSION
+(
+    VER_ID bigint NOT NULL,
+    SCHEMA_VERSION nvarchar(127) NOT NULL,
+    VERSION_COMMENT nvarchar(255) NOT NULL
+);
+
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    USER_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(1000) NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PART_ID bigint NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    PART_COL_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    DB_PRIV nvarchar(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+CREATE TABLE TAB_COL_STATS
+(
+    CS_ID bigint NOT NULL,
+    AVG_COL_LEN float NULL,
+    "COLUMN_NAME" nvarchar(1000) NOT NULL,
+    COLUMN_TYPE nvarchar(128) NOT NULL,
+    DB_NAME nvarchar(128) NOT NULL,
+    BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+    BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+    DOUBLE_HIGH_VALUE float NULL,
+    DOUBLE_LOW_VALUE float NULL,
+    LAST_ANALYZED bigint NOT NULL,
+    LONG_HIGH_VALUE bigint NULL,
+    LONG_LOW_VALUE bigint NULL,
+    MAX_COL_LEN bigint NULL,
+    NUM_DISTINCTS bigint NULL,
+    NUM_FALSES bigint NULL,
+    NUM_NULLS bigint NOT NULL,
+    NUM_TRUES bigint NULL,
+    TBL_ID bigint NULL,
+    "TABLE_NAME" nvarchar(128) NOT NULL
+);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID bigint NOT NULL,
+    TYPE_NAME nvarchar(128) NULL,
+    TYPE1 nvarchar(767) NULL,
+    TYPE2 nvarchar(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_PRIV nvarchar(128) NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID bigint NOT NULL,
+    "DESC" nvarchar(4000) NULL,
+    DB_LOCATION_URI nvarchar(4000) NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(1000) NULL,
+    CREATE_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    TBL_COL_PRIV nvarchar(128) NULL,
+    TBL_ID bigint NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT nvarchar(767) NOT NULL,
+    TOKEN nvarchar(767) NULL
+);
+
+ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID bigint NOT NULL,
+    "NAME" nvarchar(128) NULL,
+    SLIB nvarchar(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
+CREATE TABLE FUNCS
+(
+    FUNC_ID bigint NOT NULL,
+    CLASS_NAME nvarchar(4000) NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    FUNC_NAME nvarchar(128) NULL,
+    FUNC_TYPE int NOT NULL,
+    OWNER_NAME nvarchar(128) NULL,
+    OWNER_TYPE nvarchar(10) NULL
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID bigint NOT NULL,
+    ADD_TIME int NOT NULL,
+    GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+    GRANTOR nvarchar(128) NULL,
+    GRANTOR_TYPE nvarchar(128) NULL,
+    PRINCIPAL_NAME nvarchar(128) NULL,
+    PRINCIPAL_TYPE nvarchar(128) NULL,
+    ROLE_ID bigint NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID bigint NOT NULL,
+    CREATE_TIME int NOT NULL,
+    DB_ID bigint NULL,
+    LAST_ACCESS_TIME int NOT NULL,
+    OWNER nvarchar(767) NULL,
+    RETENTION int NOT NULL,
+    SD_ID bigint NULL,
+    TBL_NAME nvarchar(128) NULL,
+    TBL_TYPE nvarchar(128) NULL,
+    VIEW_EXPANDED_TEXT text NULL,
+    VIEW_ORIGINAL_TEXT text NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID bigint NOT NULL,
+    CD_ID bigint NULL,
+    INPUT_FORMAT nvarchar(4000) NULL,
+    IS_COMPRESSED bit NOT NULL,
+    IS_STOREDASSUBDIRECTORIES bit NOT NULL,
+    LOCATION nvarchar(4000) NULL,
+    NUM_BUCKETS int NOT NULL,
+    OUTPUT_FORMAT nvarchar(4000) NULL,
+    SERDE_ID bigint NULL
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID bigint NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    EVENT_TIME bigint NOT NULL,
+    EVENT_TYPE int NOT NULL,
+    PARTITION_NAME nvarchar(767) NULL,
+    TBL_NAME nvarchar(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID bigint NOT NULL,
+    "COLUMN_NAME" nvarchar(1000) NULL,
+    "ORDER" int NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table SKEWED_COL_NAMES for join relationship
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID bigint NOT NULL,
+    SKEWED_COL_NAME nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID bigint NOT NULL,
+    STRING_LIST_ID_KID bigint NOT NULL,
+    LOCATION nvarchar(4000) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+-- Table SKEWED_STRING_LIST_VALUES for join relationship
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID bigint NOT NULL,
+    STRING_LIST_VALUE nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID bigint NOT NULL,
+    PART_KEY_VAL nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID bigint NOT NULL,
+    PKEY_COMMENT nvarchar(4000) NULL,
+    PKEY_NAME nvarchar(128) NOT NULL,
+    PKEY_TYPE nvarchar(767) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table SKEWED_VALUES for join relationship
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID bigint NOT NULL,
+    STRING_LIST_ID_EID bigint NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table FUNC_RU for join relationship
+CREATE TABLE FUNC_RU
+(
+    FUNC_ID bigint NOT NULL,
+    RESOURCE_TYPE int NOT NULL,
+    RESOURCE_URI nvarchar(4000) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME bigint NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    FIELD_NAME nvarchar(128) NOT NULL,
+    FIELD_TYPE nvarchar(767) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID bigint NOT NULL,
+    BUCKET_COL_NAME nvarchar(255) NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(180) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID bigint NOT NULL,
+    COMMENT nvarchar(256) NULL,
+    "COLUMN_NAME" nvarchar(1000) NOT NULL,
+    TYPE_NAME nvarchar(4000) NOT NULL,
+    INTEGER_IDX int NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID bigint NOT NULL,
+    PARAM_KEY nvarchar(256) NOT NULL,
+    PARAM_VALUE nvarchar(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+CREATE TABLE NOTIFICATION_LOG
+(
+    NL_ID bigint NOT NULL,
+    EVENT_ID bigint NOT NULL,
+    EVENT_TIME int NOT NULL,
+    EVENT_TYPE nvarchar(32) NOT NULL,
+    DB_NAME nvarchar(128) NULL,
+    TBL_NAME nvarchar(128) NULL,
+    MESSAGE text NULL
+);
+
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+
+CREATE TABLE NOTIFICATION_SEQUENCE
+(
+    NNI_ID bigint NOT NULL,
+    NEXT_EVENT_ID bigint NOT NULL
+);
+
+ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+
+-- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
+
+
+-- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+
+-- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
+
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME");
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table SKEWED_COL_NAMES
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
+
+
+-- Constraints for table SKEWED_COL_VALUE_LOC_MAP
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
+
+CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
+
+
+-- Constraints for table SKEWED_STRING_LIST_VALUES
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table SKEWED_VALUES
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+
+CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
+
+CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table FUNC_RU
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+-- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+-- -----------------------------------------------------------------------------------------------------------------------------------------------
+CREATE TABLE COMPACTION_QUEUE(
+	CQ_ID bigint NOT NULL,
+	CQ_DATABASE nvarchar(128) NOT NULL,
+	CQ_TABLE nvarchar(128) NOT NULL,
+	CQ_PARTITION nvarchar(767) NULL,
+	CQ_STATE char(1) NOT NULL,
+	CQ_TYPE char(1) NOT NULL,
+	CQ_WORKER_ID nvarchar(128) NULL,
+	CQ_START bigint NULL,
+	CQ_RUN_AS nvarchar(128) NULL,
+	CQ_HIGHEST_TXN_ID bigint NULL,
+    CQ_META_INFO varbinary(2048) NULL,
+	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CQ_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+	CC_ID bigint NOT NULL,
+	CC_DATABASE nvarchar(128) NOT NULL,
+	CC_TABLE nvarchar(128) NOT NULL,
+	CC_PARTITION nvarchar(767) NULL,
+	CC_STATE char(1) NOT NULL,
+	CC_TYPE char(1) NOT NULL,
+	CC_WORKER_ID nvarchar(128) NULL,
+	CC_START bigint NULL,
+	CC_END bigint NULL,
+	CC_RUN_AS nvarchar(128) NULL,
+	CC_HIGHEST_TXN_ID bigint NULL,
+    CC_META_INFO varbinary(2048) NULL,
+	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CC_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS(
+	CTC_TXNID bigint NULL,
+	CTC_DATABASE nvarchar(128) NOT NULL,
+	CTC_TABLE nvarchar(128) NULL,
+	CTC_PARTITION nvarchar(767) NULL
+);
+
+CREATE TABLE HIVE_LOCKS(
+	HL_LOCK_EXT_ID bigint NOT NULL,
+	HL_LOCK_INT_ID bigint NOT NULL,
+	HL_TXNID bigint NULL,
+	HL_DB nvarchar(128) NOT NULL,
+	HL_TABLE nvarchar(128) NULL,
+	HL_PARTITION nvarchar(767) NULL,
+	HL_LOCK_STATE char(1) NOT NULL,
+	HL_LOCK_TYPE char(1) NOT NULL,
+	HL_LAST_HEARTBEAT bigint NOT NULL,
+	HL_ACQUIRED_AT bigint NULL,
+	HL_USER nvarchar(128) NOT NULL,
+	HL_HOST nvarchar(128) NOT NULL,
+    HL_HEARTBEAT_COUNT int NULL,
+    HL_AGENT_INFO nvarchar(128) NULL,
+    HL_BLOCKEDBY_EXT_ID bigint NULL,
+    HL_BLOCKEDBY_INT_ID bigint NULL,
+PRIMARY KEY CLUSTERED 
+(
+	HL_LOCK_EXT_ID ASC,
+	HL_LOCK_INT_ID ASC
+)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+	NCQ_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE NEXT_LOCK_ID(
+	NL_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE NEXT_TXN_ID(
+	NTXN_NEXT bigint NOT NULL
+);
+
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE TXNS(
+	TXN_ID bigint NOT NULL,
+	TXN_STATE char(1) NOT NULL,
+	TXN_STARTED bigint NOT NULL,
+	TXN_LAST_HEARTBEAT bigint NOT NULL,
+	TXN_USER nvarchar(128) NOT NULL,
+	TXN_HOST nvarchar(128) NOT NULL,
+    TXN_AGENT_INFO nvarchar(128) NULL,
+    TXN_META_INFO nvarchar(128) NULL,
+    TXN_HEARTBEAT_COUNT int NULL,
+PRIMARY KEY CLUSTERED 
+(
+	TXN_ID ASC
+)
+);
+
+CREATE TABLE TXN_COMPONENTS(
+	TC_TXNID bigint NULL,
+	TC_DATABASE nvarchar(128) NOT NULL,
+	TC_TABLE nvarchar(128) NULL,
+	TC_PARTITION nvarchar(767) NULL,
+	TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 nvarchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT nvarchar(255) NULL,
+  PRIMARY KEY CLUSTERED
+(
+    MT_KEY1 ASC,
+    MT_KEY2 ASC
+)
+);
+
+CREATE TABLE KEY_CONSTRAINTS
+(
+  CHILD_CD_ID BIGINT,
+  CHILD_INTEGER_IDX INT,
+  CHILD_TBL_ID BIGINT,
+  PARENT_CD_ID BIGINT NOT NULL,
+  PARENT_INTEGER_IDX INT NOT NULL,
+  PARENT_TBL_ID BIGINT NOT NULL,
+  POSITION INT NOT NULL,
+  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+  CONSTRAINT_TYPE SMALLINT NOT NULL,
+  UPDATE_RULE SMALLINT,
+  DELETE_RULE SMALLINT,
+  ENABLE_VALIDATE_RELY SMALLINT NOT NULL
+) ;
+
+ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+
+CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE nvarchar(128) NOT NULL,
+  WS_TABLE nvarchar(128) NOT NULL,
+  WS_PARTITION nvarchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);
+
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.2.0', 'Hive release version 2.2.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
new file mode 100644
index 0000000..df97206
--- /dev/null
+++ b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql
@@ -0,0 +1,4 @@
+SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;
+
+UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mssql/upgrade.order.mssql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mssql/upgrade.order.mssql b/metastore/scripts/upgrade/mssql/upgrade.order.mssql
index b5eee93..6162140 100644
--- a/metastore/scripts/upgrade/mssql/upgrade.order.mssql
+++ b/metastore/scripts/upgrade/mssql/upgrade.order.mssql
@@ -5,3 +5,4 @@
 1.1.0-to-1.2.0
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
+2.1.0-to-2.2.0

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
new file mode 100644
index 0000000..91e221d
--- /dev/null
+++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql
@@ -0,0 +1,851 @@
+-- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+--
+-- Host: localhost    Database: test
+-- ------------------------------------------------------
+-- Server version	5.5.25
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8 */;
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+
+--
+-- Table structure for table `BUCKETING_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `BUCKETING_COLS_N49` (`SD_ID`),
+  CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `CDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `CDS` (
+  `CD_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `COLUMNS_V2`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+  `CD_ID` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE_NAME` varchar(4000) DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+  KEY `COLUMNS_V2_N49` (`CD_ID`),
+  CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DATABASE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+  KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+  CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DBS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DBS` (
+  `DB_ID` bigint(20) NOT NULL,
+  `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_ID`),
+  UNIQUE KEY `UNIQUE_DATABASE` (`NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `DB_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+  `DB_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`DB_GRANT_ID`),
+  UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `DB_PRIVS_N49` (`DB_ID`),
+  CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `GLOBAL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+  `USER_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`USER_GRANT_ID`),
+  UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `IDXS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `IDXS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DEFERRED_REBUILD` bit(1) NOT NULL,
+  `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`),
+  UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+  KEY `IDXS_N51` (`SD_ID`),
+  KEY `IDXS_N50` (`INDEX_TBL_ID`),
+  KEY `IDXS_N49` (`ORIG_TBL_ID`),
+  CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `INDEX_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+  `INDEX_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+  KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+  CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `NUCLEUS_TABLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+  `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`CLASS_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITIONS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`),
+  UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+  KEY `PARTITIONS_N49` (`TBL_ID`),
+  KEY `PARTITIONS_N50` (`SD_ID`),
+  CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+  CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_EVENTS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+  `PART_NAME_ID` bigint(20) NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `EVENT_TIME` bigint(20) NOT NULL,
+  `EVENT_TYPE` int(11) NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_NAME_ID`),
+  KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEYS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+  KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+  CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_KEY_VALS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+  KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PARTITION_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+  `PART_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+  KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+  CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+  `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+  KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+  KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `PART_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+  `PART_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_ID` bigint(20) DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`PART_GRANT_ID`),
+  KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `PART_PRIVS_N49` (`PART_ID`),
+  CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLES` (
+  `ROLE_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`ROLE_ID`),
+  UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `ROLE_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+  `ROLE_GRANT_ID` bigint(20) NOT NULL,
+  `ADD_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ROLE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`ROLE_GRANT_ID`),
+  UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `ROLE_MAP_N49` (`ROLE_ID`),
+  CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SDS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `CD_ID` bigint(20) DEFAULT NULL,
+  `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `IS_COMPRESSED` bit(1) NOT NULL,
+  `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `NUM_BUCKETS` int(11) NOT NULL,
+  `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SERDE_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`),
+  KEY `SDS_N49` (`SERDE_ID`),
+  KEY `SDS_N50` (`CD_ID`),
+  CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+  CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SD_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+  KEY `SD_PARAMS_N49` (`SD_ID`),
+  CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SEQUENCE_TABLE`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+  `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `NEXT_VAL` bigint(20) NOT NULL,
+  PRIMARY KEY (`SEQUENCE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDES` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SERDE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+  `SERDE_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+  KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+  CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_NAMES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+  `SD_ID` bigint(20) NOT NULL,
+  `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+  `SD_ID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+  `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+  KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_STRING_LIST_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+  `STRING_LIST_ID` bigint(20) NOT NULL,
+  `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+  KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SKEWED_VALUES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+  `SD_ID_OID` bigint(20) NOT NULL,
+  `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+  KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+  KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+  CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+  CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `SORT_COLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+  `SD_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `ORDER` int(11) NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+  KEY `SORT_COLS_N49` (`SD_ID`),
+  CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TABLE_PARAMS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+  KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+  CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBLS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBLS` (
+  `TBL_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `DB_ID` bigint(20) DEFAULT NULL,
+  `LAST_ACCESS_TIME` int(11) NOT NULL,
+  `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `RETENTION` int(11) NOT NULL,
+  `SD_ID` bigint(20) DEFAULT NULL,
+  `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `VIEW_EXPANDED_TEXT` mediumtext,
+  `VIEW_ORIGINAL_TEXT` mediumtext,
+  PRIMARY KEY (`TBL_ID`),
+  UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+  KEY `TBLS_N50` (`SD_ID`),
+  KEY `TBLS_N49` (`DB_ID`),
+  CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+  CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_COL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+  `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+  `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+  KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+  CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TBL_PRIVS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+  `TBL_GRANT_ID` bigint(20) NOT NULL,
+  `CREATE_TIME` int(11) NOT NULL,
+  `GRANT_OPTION` smallint(6) NOT NULL,
+  `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TBL_ID` bigint(20) DEFAULT NULL,
+  PRIMARY KEY (`TBL_GRANT_ID`),
+  KEY `TBL_PRIVS_N49` (`TBL_ID`),
+  KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+  CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TAB_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TBL_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table `PART_COL_STATS`
+--
+CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+ `CS_ID` bigint(20) NOT NULL,
+ `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+ `PART_ID` bigint(20) NOT NULL,
+ `LONG_LOW_VALUE` bigint(20),
+ `LONG_HIGH_VALUE` bigint(20),
+ `DOUBLE_HIGH_VALUE` double(53,4),
+ `DOUBLE_LOW_VALUE` double(53,4),
+ `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+ `NUM_NULLS` bigint(20) NOT NULL,
+ `NUM_DISTINCTS` bigint(20),
+ `AVG_COL_LEN` double(53,4),
+ `MAX_COL_LEN` bigint(20),
+ `NUM_TRUES` bigint(20),
+ `NUM_FALSES` bigint(20),
+ `LAST_ANALYZED` bigint(20) NOT NULL,
+  PRIMARY KEY (`CS_ID`),
+  CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+
+--
+-- Table structure for table `TYPES`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPES` (
+  `TYPES_ID` bigint(20) NOT NULL,
+  `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  PRIMARY KEY (`TYPES_ID`),
+  UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `TYPE_FIELDS`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+  `TYPE_NAME` bigint(20) NOT NULL,
+  `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+  `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `INTEGER_IDX` int(11) NOT NULL,
+  PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+  KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+  CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+CREATE TABLE IF NOT EXISTS `MASTER_KEYS` 
+(
+    `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+    `MASTER_KEY` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`KEY_ID`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+(
+    `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+    `TOKEN` VARCHAR(767) BINARY NULL,
+    PRIMARY KEY (`TOKEN_IDENT`)
+) ENGINE=INNODB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE IF NOT EXISTS `VERSION` (
+  `VER_ID` BIGINT NOT NULL,
+  `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+  `VERSION_COMMENT` VARCHAR(255),
+  PRIMARY KEY (`VER_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNCS
+--
+CREATE TABLE IF NOT EXISTS `FUNCS` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `CREATE_TIME` INT(11) NOT NULL,
+  `DB_ID` BIGINT(20),
+  `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `FUNC_TYPE` INT(11) NOT NULL,
+  `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+  `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+  PRIMARY KEY (`FUNC_ID`),
+  UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+  KEY `FUNCS_N49` (`DB_ID`),
+  CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+--
+-- Table structure for table FUNC_RU
+--
+CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+  `FUNC_ID` BIGINT(20) NOT NULL,
+  `RESOURCE_TYPE` INT(11) NOT NULL,
+  `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `INTEGER_IDX` INT(11) NOT NULL,
+  PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+  CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
+(
+    `NL_ID` BIGINT(20) NOT NULL,
+    `EVENT_ID` BIGINT(20) NOT NULL,
+    `EVENT_TIME` INT(11) NOT NULL,
+    `EVENT_TYPE` varchar(32) NOT NULL,
+    `DB_NAME` varchar(128),
+    `TBL_NAME` varchar(128),
+    `MESSAGE` mediumtext,
+    PRIMARY KEY (`NL_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
+(
+    `NNI_ID` BIGINT(20) NOT NULL,
+    `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
+    PRIMARY KEY (`NNI_ID`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
+(
+  `CHILD_CD_ID` BIGINT,
+  `CHILD_INTEGER_IDX` INT(11),
+  `CHILD_TBL_ID` BIGINT,
+  `PARENT_CD_ID` BIGINT NOT NULL,
+  `PARENT_INTEGER_IDX` INT(11) NOT NULL,
+  `PARENT_TBL_ID` BIGINT NOT NULL,
+  `POSITION` BIGINT NOT NULL,
+  `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
+  `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
+  `UPDATE_RULE` SMALLINT(6),
+  `DELETE_RULE` SMALLINT(6),
+  `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
+  PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- ----------------------------
+SOURCE hive-txn-schema-2.2.0.mysql.sql;
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.2.0', 'Hive release version 2.2.0');
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
new file mode 100644
index 0000000..369d6bb
--- /dev/null
+++ b/metastore/scripts/upgrade/mysql/hive-txn-schema-2.2.0.mysql.sql
@@ -0,0 +1,131 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT int
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint NOT NULL,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128) NOT NULL,
+  TC_PARTITION varchar(767),
+  TC_OPERATION_TYPE char(1) NOT NULL,
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint NOT NULL,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT int,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO varbinary(2048),
+  CQ_HADOOP_JOB_ID varchar(32)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO varbinary(2048),
+  CC_HADOOP_JOB_ID varchar(32)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
new file mode 100644
index 0000000..de38b58
--- /dev/null
+++ b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql
@@ -0,0 +1,5 @@
+SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';
+
+UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' ';
+

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/mysql/upgrade.order.mysql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/mysql/upgrade.order.mysql b/metastore/scripts/upgrade/mysql/upgrade.order.mysql
index e0bb692..420174a 100644
--- a/metastore/scripts/upgrade/mysql/upgrade.order.mysql
+++ b/metastore/scripts/upgrade/mysql/upgrade.order.mysql
@@ -11,3 +11,4 @@
 1.1.0-to-1.2.0
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
+2.1.0-to-2.2.0


[28/66] [abbrv] hive git commit: Preparing for 2.2.0 development

Posted by sp...@apache.org.
Preparing for 2.2.0 development


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e22e3ad3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e22e3ad3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e22e3ad3

Branch: refs/heads/java8
Commit: e22e3ad3fe758eecc7b2d3163529f3c2e362c466
Parents: cd27454
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed May 25 10:21:48 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed May 25 10:46:11 2016 +0100

----------------------------------------------------------------------
 accumulo-handler/pom.xml                        |    2 +-
 ant/pom.xml                                     |    2 +-
 beeline/pom.xml                                 |    2 +-
 cli/pom.xml                                     |    2 +-
 common/pom.xml                                  |    2 +-
 contrib/pom.xml                                 |    2 +-
 hbase-handler/pom.xml                           |    2 +-
 hcatalog/core/pom.xml                           |    2 +-
 hcatalog/hcatalog-pig-adapter/pom.xml           |    2 +-
 hcatalog/pom.xml                                |    2 +-
 hcatalog/server-extensions/pom.xml              |    2 +-
 hcatalog/streaming/pom.xml                      |    2 +-
 hcatalog/webhcat/java-client/pom.xml            |    2 +-
 hcatalog/webhcat/svr/pom.xml                    |    2 +-
 hplsql/pom.xml                                  |    2 +-
 hwi/pom.xml                                     |    2 +-
 itests/custom-serde/pom.xml                     |    2 +-
 itests/custom-udfs/pom.xml                      |    2 +-
 itests/custom-udfs/udf-classloader-udf1/pom.xml |    2 +-
 itests/custom-udfs/udf-classloader-udf2/pom.xml |    2 +-
 itests/custom-udfs/udf-classloader-util/pom.xml |    2 +-
 itests/hcatalog-unit/pom.xml                    |    2 +-
 itests/hive-jmh/pom.xml                         |    2 +-
 itests/hive-minikdc/pom.xml                     |    2 +-
 itests/hive-unit-hadoop2/pom.xml                |    2 +-
 itests/hive-unit/pom.xml                        |    2 +-
 itests/pom.xml                                  |    2 +-
 itests/qtest-accumulo/pom.xml                   |    2 +-
 itests/qtest-spark/pom.xml                      |    2 +-
 itests/qtest/pom.xml                            |    2 +-
 itests/test-serde/pom.xml                       |    2 +-
 itests/util/pom.xml                             |    2 +-
 jdbc/pom.xml                                    |    2 +-
 llap-client/pom.xml                             |    2 +-
 llap-common/pom.xml                             |    2 +-
 llap-ext-client/pom.xml                         |    2 +-
 llap-server/pom.xml                             |    2 +-
 llap-tez/pom.xml                                |    2 +-
 metastore/pom.xml                               |    2 +-
 .../upgrade/derby/hive-schema-2.2.0.derby.sql   |  340 ++++
 .../derby/hive-txn-schema-2.2.0.derby.sql       |  130 ++
 .../derby/upgrade-2.1.0-to-2.2.0.derby.sql      |    3 +
 .../scripts/upgrade/derby/upgrade.order.derby   |    1 +
 .../upgrade/mssql/hive-schema-2.2.0.mssql.sql   | 1017 ++++++++++++
 .../mssql/upgrade-2.1.0-to-2.2.0.mssql.sql      |    4 +
 .../scripts/upgrade/mssql/upgrade.order.mssql   |    1 +
 .../upgrade/mysql/hive-schema-2.2.0.mysql.sql   |  851 ++++++++++
 .../mysql/hive-txn-schema-2.2.0.mysql.sql       |  131 ++
 .../mysql/upgrade-2.1.0-to-2.2.0.mysql.sql      |    5 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |    1 +
 .../upgrade/oracle/hive-schema-2.2.0.oracle.sql |  809 ++++++++++
 .../oracle/hive-txn-schema-2.2.0.oracle.sql     |  129 ++
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |    4 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |    1 +
 .../postgres/hive-schema-2.2.0.postgres.sql     | 1476 ++++++++++++++++++
 .../postgres/hive-txn-schema-2.2.0.postgres.sql |  129 ++
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |    5 +
 .../upgrade/postgres/upgrade.order.postgres     |    1 +
 orc/pom.xml                                     |    2 +-
 packaging/pom.xml                               |    2 +-
 pom.xml                                         |    2 +-
 ql/pom.xml                                      |    2 +-
 serde/pom.xml                                   |    2 +-
 service-rpc/pom.xml                             |    2 +-
 service/pom.xml                                 |    2 +-
 shims/0.23/pom.xml                              |    2 +-
 shims/aggregator/pom.xml                        |    2 +-
 shims/common/pom.xml                            |    2 +-
 shims/pom.xml                                   |    2 +-
 shims/scheduler/pom.xml                         |    2 +-
 spark-client/pom.xml                            |    4 +-
 storage-api/pom.xml                             |    2 +-
 testutils/pom.xml                               |    2 +-
 73 files changed, 5093 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/accumulo-handler/pom.xml
----------------------------------------------------------------------
diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml
index 4b7e91f..d22a54c 100644
--- a/accumulo-handler/pom.xml
+++ b/accumulo-handler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/ant/pom.xml
----------------------------------------------------------------------
diff --git a/ant/pom.xml b/ant/pom.xml
index 88dfc8e..6414ef6 100644
--- a/ant/pom.xml
+++ b/ant/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/beeline/pom.xml
----------------------------------------------------------------------
diff --git a/beeline/pom.xml b/beeline/pom.xml
index 8ac83f5..a720d08 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/cli/pom.xml
----------------------------------------------------------------------
diff --git a/cli/pom.xml b/cli/pom.xml
index 6f2e664..10fb1b9 100644
--- a/cli/pom.xml
+++ b/cli/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index 9933072..b7244aa 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/contrib/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/pom.xml b/contrib/pom.xml
index 4fee665..6cc4931 100644
--- a/contrib/pom.xml
+++ b/contrib/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hbase-handler/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml
index 76ee8b1..8dc47e9 100644
--- a/hbase-handler/pom.xml
+++ b/hbase-handler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/core/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/core/pom.xml b/hcatalog/core/pom.xml
index c9a6c01..506bf22 100644
--- a/hcatalog/core/pom.xml
+++ b/hcatalog/core/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/hcatalog-pig-adapter/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/hcatalog-pig-adapter/pom.xml b/hcatalog/hcatalog-pig-adapter/pom.xml
index 5056ba8..9763d36 100644
--- a/hcatalog/hcatalog-pig-adapter/pom.xml
+++ b/hcatalog/hcatalog-pig-adapter/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/pom.xml b/hcatalog/pom.xml
index e50125c..34de177 100644
--- a/hcatalog/pom.xml
+++ b/hcatalog/pom.xml
@@ -24,7 +24,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/server-extensions/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/server-extensions/pom.xml b/hcatalog/server-extensions/pom.xml
index 1a69169..7d34543 100644
--- a/hcatalog/server-extensions/pom.xml
+++ b/hcatalog/server-extensions/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/streaming/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/pom.xml b/hcatalog/streaming/pom.xml
index ca29114..e765305 100644
--- a/hcatalog/streaming/pom.xml
+++ b/hcatalog/streaming/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/webhcat/java-client/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/pom.xml b/hcatalog/webhcat/java-client/pom.xml
index 52d379d..3b53664 100644
--- a/hcatalog/webhcat/java-client/pom.xml
+++ b/hcatalog/webhcat/java-client/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hcatalog/webhcat/svr/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index 899b701..c5ad387 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive.hcatalog</groupId>
     <artifactId>hive-hcatalog</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hplsql/pom.xml
----------------------------------------------------------------------
diff --git a/hplsql/pom.xml b/hplsql/pom.xml
index d889fa7..d1337cb 100644
--- a/hplsql/pom.xml
+++ b/hplsql/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/hwi/pom.xml
----------------------------------------------------------------------
diff --git a/hwi/pom.xml b/hwi/pom.xml
index 31e790f..4e27be8 100644
--- a/hwi/pom.xml
+++ b/hwi/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/custom-serde/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-serde/pom.xml b/itests/custom-serde/pom.xml
index 0908562..166ffde 100644
--- a/itests/custom-serde/pom.xml
+++ b/itests/custom-serde/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/custom-udfs/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/pom.xml b/itests/custom-udfs/pom.xml
index 69e1f70..3e7443c 100644
--- a/itests/custom-udfs/pom.xml
+++ b/itests/custom-udfs/pom.xml
@@ -19,7 +19,7 @@ limitations under the License.
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/custom-udfs/udf-classloader-udf1/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-classloader-udf1/pom.xml b/itests/custom-udfs/udf-classloader-udf1/pom.xml
index 5f35c63..0a95c94 100644
--- a/itests/custom-udfs/udf-classloader-udf1/pom.xml
+++ b/itests/custom-udfs/udf-classloader-udf1/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/custom-udfs/udf-classloader-udf2/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-classloader-udf2/pom.xml b/itests/custom-udfs/udf-classloader-udf2/pom.xml
index 8edca63..e3f30f1 100644
--- a/itests/custom-udfs/udf-classloader-udf2/pom.xml
+++ b/itests/custom-udfs/udf-classloader-udf2/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/custom-udfs/udf-classloader-util/pom.xml
----------------------------------------------------------------------
diff --git a/itests/custom-udfs/udf-classloader-util/pom.xml b/itests/custom-udfs/udf-classloader-util/pom.xml
index b6475f5..fe285d7 100644
--- a/itests/custom-udfs/udf-classloader-util/pom.xml
+++ b/itests/custom-udfs/udf-classloader-util/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it-custom-udfs</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/hcatalog-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/pom.xml b/itests/hcatalog-unit/pom.xml
index 0f19b7d..3ef87f9 100644
--- a/itests/hcatalog-unit/pom.xml
+++ b/itests/hcatalog-unit/pom.xml
@@ -25,7 +25,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/hive-jmh/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/pom.xml b/itests/hive-jmh/pom.xml
index 411769d..f1417fd 100644
--- a/itests/hive-jmh/pom.xml
+++ b/itests/hive-jmh/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/hive-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/pom.xml b/itests/hive-minikdc/pom.xml
index 436cd70..dcc5c2d 100644
--- a/itests/hive-minikdc/pom.xml
+++ b/itests/hive-minikdc/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/hive-unit-hadoop2/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/pom.xml b/itests/hive-unit-hadoop2/pom.xml
index 202cb43..fbf1c73 100644
--- a/itests/hive-unit-hadoop2/pom.xml
+++ b/itests/hive-unit-hadoop2/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/hive-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index b248673..1a07651 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/pom.xml
----------------------------------------------------------------------
diff --git a/itests/pom.xml b/itests/pom.xml
index b68aa50..426ba04 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/qtest-accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index aafae8b..339c599 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/qtest-spark/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml
index c80fb04..3bc9e24 100644
--- a/itests/qtest-spark/pom.xml
+++ b/itests/qtest-spark/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index a479557..306a0ed 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/test-serde/pom.xml
----------------------------------------------------------------------
diff --git a/itests/test-serde/pom.xml b/itests/test-serde/pom.xml
index fbcd457..df0ce8a 100644
--- a/itests/test-serde/pom.xml
+++ b/itests/test-serde/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/itests/util/pom.xml
----------------------------------------------------------------------
diff --git a/itests/util/pom.xml b/itests/util/pom.xml
index 4789586..6d93dc1 100644
--- a/itests/util/pom.xml
+++ b/itests/util/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive-it</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/jdbc/pom.xml
----------------------------------------------------------------------
diff --git a/jdbc/pom.xml b/jdbc/pom.xml
index f87ab59..b4f3c7e 100644
--- a/jdbc/pom.xml
+++ b/jdbc/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/llap-client/pom.xml
----------------------------------------------------------------------
diff --git a/llap-client/pom.xml b/llap-client/pom.xml
index cbfdcd9..0243340 100644
--- a/llap-client/pom.xml
+++ b/llap-client/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/llap-common/pom.xml
----------------------------------------------------------------------
diff --git a/llap-common/pom.xml b/llap-common/pom.xml
index ceac83b..0e126f9 100644
--- a/llap-common/pom.xml
+++ b/llap-common/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/llap-ext-client/pom.xml
----------------------------------------------------------------------
diff --git a/llap-ext-client/pom.xml b/llap-ext-client/pom.xml
index fdf16cd..5ba0ec5 100644
--- a/llap-ext-client/pom.xml
+++ b/llap-ext-client/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/llap-server/pom.xml
----------------------------------------------------------------------
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 9de3443..22d17b7 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/llap-tez/pom.xml
----------------------------------------------------------------------
diff --git a/llap-tez/pom.xml b/llap-tez/pom.xml
index ce020da..c0fbe08 100644
--- a/llap-tez/pom.xml
+++ b/llap-tez/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/pom.xml
----------------------------------------------------------------------
diff --git a/metastore/pom.xml b/metastore/pom.xml
index 3827a51..7f75d4db 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
new file mode 100644
index 0000000..ae980e0
--- /dev/null
+++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql
@@ -0,0 +1,340 @@
+-- Timestamp: 2011-09-22 15:32:02.024
+-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+-- Specified schema is: APP
+-- appendLogs: false
+
+-- ----------------------------------------------
+-- DDL Statements for functions
+-- ----------------------------------------------
+
+CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+
+CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+
+-- ----------------------------------------------
+-- DDL Statements for tables
+-- ----------------------------------------------
+
+CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(1000) NOT NULL, "TYPE_NAME" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+
+CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
+
+CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000));
+
+CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+
+CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+
+CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR);
+
+CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(1000), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128));
+
+CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
+
+CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+
+RUN '022-HIVE-11107.derby.sql';
+
+CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+
+CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+
+CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
+
+CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
+
+CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
+
+CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(128) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(1000) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
+
+CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+
+CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+
+CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" LONG VARCHAR, "TBL_NAME" VARCHAR(128));
+
+CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
+
+CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER NOT NULL, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT NOT NULL, "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL);
+
+ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
+
+CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
+
+-- ----------------------------------------------
+-- DDL Statements for indexes
+-- ----------------------------------------------
+
+CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+
+CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+
+CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME");
+
+CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+
+CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+
+CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+
+CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+
+CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+
+-- ----------------------------------------------
+-- DDL Statements for keys
+-- ----------------------------------------------
+
+-- primary/unique
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+
+ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+
+ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+
+ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+
+ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
+
+ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
+
+-- foreign
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
+
+ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+
+-- ----------------------------------------------
+-- DDL Statements for checks
+-- ----------------------------------------------
+
+ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+
+ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+
+-- ----------------------------
+-- Transaction and Lock Tables
+-- ----------------------------
+RUN 'hive-txn-schema-2.2.0.derby.sql';
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.2.0', 'Hive release version 2.2.0');
+

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
new file mode 100644
index 0000000..11d86ca
--- /dev/null
+++ b/metastore/scripts/upgrade/derby/hive-txn-schema-2.2.0.derby.sql
@@ -0,0 +1,130 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the License); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an AS IS BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT integer
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT integer,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO varchar(2048) for bit data,
+  CQ_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO varchar(2048) for bit data,
+  CC_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+--1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
+--This is a good candidate for Index orgainzed table
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
new file mode 100644
index 0000000..25a5e37
--- /dev/null
+++ b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql
@@ -0,0 +1,3 @@
+-- Upgrade MetaStore schema from 2.1.0 to 2.2.0
+
+UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/derby/upgrade.order.derby
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/derby/upgrade.order.derby b/metastore/scripts/upgrade/derby/upgrade.order.derby
index e0bb692..420174a 100644
--- a/metastore/scripts/upgrade/derby/upgrade.order.derby
+++ b/metastore/scripts/upgrade/derby/upgrade.order.derby
@@ -11,3 +11,4 @@
 1.1.0-to-1.2.0
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
+2.1.0-to-2.2.0


[61/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9687dcc9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9687dcc9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9687dcc9

Branch: refs/heads/java8
Commit: 9687dcc94b2ced7755f5173b56113af906c43466
Parents: 8694435
Author: Mohit Sabharwal <mo...@cloudera.com>
Authored: Fri May 20 11:14:13 2016 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 26 10:39:18 2016 -0500

----------------------------------------------------------------------
 .../columnstats_partlvl_invalid_values.q        |    1 -
 .../clientpositive/authorization_explain.q      |    1 -
 ql/src/test/queries/clientpositive/avro_date.q  |    1 -
 .../clientpositive/avro_deserialize_map_null.q  |    1 -
 .../clientpositive/avro_nullable_fields.q       |    1 -
 .../queries/clientpositive/avro_timestamp.q     |    1 -
 .../clientpositive/cbo_rp_outer_join_ppr.q      |    1 -
 ql/src/test/queries/clientpositive/char_udf1.q  |    1 -
 ql/src/test/queries/clientpositive/input4.q     |    1 -
 ql/src/test/queries/clientpositive/join0.q      |    1 -
 .../queries/clientpositive/list_bucket_dml_10.q |    1 -
 .../queries/clientpositive/list_bucket_dml_11.q |    1 -
 .../queries/clientpositive/list_bucket_dml_12.q |    1 -
 .../queries/clientpositive/list_bucket_dml_13.q |    1 -
 .../queries/clientpositive/list_bucket_dml_2.q  |    1 -
 .../queries/clientpositive/list_bucket_dml_4.q  |    1 -
 .../queries/clientpositive/list_bucket_dml_5.q  |    1 -
 .../queries/clientpositive/list_bucket_dml_6.q  |    1 -
 .../queries/clientpositive/list_bucket_dml_8.q  |    1 -
 .../queries/clientpositive/list_bucket_dml_9.q  |    1 -
 .../queries/clientpositive/outer_join_ppr.q     |    1 -
 .../queries/clientpositive/parquet_map_null.q   |    1 -
 ql/src/test/queries/clientpositive/plan_json.q  |    1 -
 .../queries/clientpositive/stats_list_bucket.q  |    1 -
 ql/src/test/queries/clientpositive/str_to_map.q |    1 -
 .../clientpositive/subquery_multiinsert.q       |    1 -
 .../clientpositive/subquery_notin_having.q      |    1 -
 .../test/queries/clientpositive/varchar_udf1.q  |    1 -
 .../clientpositive/vector_cast_constant.q       |    1 -
 ...mnstats_partlvl_invalid_values.q.java1.7.out |   73 --
 ...mnstats_partlvl_invalid_values.q.java1.8.out |   73 --
 .../columnstats_partlvl_invalid_values.q.out    |   69 ++
 .../authorization_explain.q.java1.7.out         |   44 -
 .../authorization_explain.q.java1.8.out         |   47 -
 .../clientpositive/authorization_explain.q.out  |   40 +
 .../clientpositive/avro_date.q.java1.7.out      |  130 --
 .../clientpositive/avro_date.q.java1.8.out      |  130 --
 .../test/results/clientpositive/avro_date.q.out |  126 ++
 .../avro_deserialize_map_null.q.java1.7.out     |   57 -
 .../avro_deserialize_map_null.q.java1.8.out     |   57 -
 .../avro_deserialize_map_null.q.out             |   55 +
 .../avro_nullable_fields.q.java1.7.out          |  179 ---
 .../avro_nullable_fields.q.java1.8.out          |  179 ---
 .../clientpositive/avro_nullable_fields.q.out   |  177 +++
 .../clientpositive/avro_timestamp.q.java1.7.out |  134 ---
 .../clientpositive/avro_timestamp.q.java1.8.out |  134 ---
 .../results/clientpositive/avro_timestamp.q.out |  132 +++
 .../cbo_rp_outer_join_ppr.q.java1.7.out         |  693 -----------
 .../clientpositive/cbo_rp_outer_join_ppr.q.out  |  691 +++++++++++
 .../clientpositive/char_udf1.q.java1.7.out      |  463 --------
 .../clientpositive/char_udf1.q.java1.8.out      |  457 -------
 .../test/results/clientpositive/char_udf1.q.out |  459 +++++++
 .../results/clientpositive/input4.q.java1.7.out |  559 ---------
 .../results/clientpositive/input4.q.java1.8.out |  559 ---------
 ql/src/test/results/clientpositive/input4.q.out |  555 +++++++++
 .../results/clientpositive/join0.q.java1.7.out  |  240 ----
 .../results/clientpositive/join0.q.java1.8.out  |  240 ----
 ql/src/test/results/clientpositive/join0.q.out  |  238 ++++
 .../list_bucket_dml_10.q.java1.7.out            |  361 ------
 .../list_bucket_dml_10.q.java1.8.out            |  389 ------
 .../clientpositive/list_bucket_dml_10.q.out     |  359 ++++++
 .../list_bucket_dml_11.q.java1.7.out            |  329 -----
 .../list_bucket_dml_11.q.java1.8.out            |  424 -------
 .../clientpositive/list_bucket_dml_11.q.out     |  327 +++++
 .../list_bucket_dml_12.q.java1.7.out            |  426 -------
 .../list_bucket_dml_12.q.java1.8.out            |  596 ----------
 .../clientpositive/list_bucket_dml_12.q.out     |  424 +++++++
 .../list_bucket_dml_13.q.java1.7.out            |  337 ------
 .../list_bucket_dml_13.q.java1.8.out            |  439 -------
 .../clientpositive/list_bucket_dml_13.q.out     |  335 ++++++
 .../list_bucket_dml_2.q.java1.7.out             |  591 ---------
 .../list_bucket_dml_2.q.java1.8.out             |  692 -----------
 .../clientpositive/list_bucket_dml_2.q.out      |  589 +++++++++
 .../list_bucket_dml_4.q.java1.7.out             |  813 -------------
 .../list_bucket_dml_4.q.java1.8.out             |  915 --------------
 .../clientpositive/list_bucket_dml_4.q.out      |  811 +++++++++++++
 .../list_bucket_dml_5.q.java1.7.out             |  506 --------
 .../list_bucket_dml_5.q.java1.8.out             |  617 ----------
 .../clientpositive/list_bucket_dml_5.q.out      |  504 ++++++++
 .../list_bucket_dml_6.q.java1.7.out             | 1007 ----------------
 .../list_bucket_dml_6.q.java1.8.out             | 1119 ------------------
 .../clientpositive/list_bucket_dml_6.q.out      | 1005 ++++++++++++++++
 .../list_bucket_dml_8.q.java1.7.out             |  641 ----------
 .../list_bucket_dml_8.q.java1.8.out             |  712 -----------
 .../clientpositive/list_bucket_dml_8.q.out      |  639 ++++++++++
 .../list_bucket_dml_9.q.java1.7.out             |  813 -------------
 .../list_bucket_dml_9.q.java1.8.out             |  915 --------------
 .../clientpositive/list_bucket_dml_9.q.out      |  811 +++++++++++++
 .../clientpositive/llap/join0.q.java1.7.out     |  242 ----
 .../clientpositive/llap/join0.q.java1.8.out     |  242 ----
 .../results/clientpositive/llap/join0.q.out     |  243 ++++
 .../llap/vector_cast_constant.q.java1.7.out     |  217 ----
 .../llap/vector_cast_constant.q.java1.8.out     |  217 ----
 .../llap/vector_cast_constant.q.out             |  216 ++++
 .../clientpositive/outer_join_ppr.q.java1.7.out |  685 -----------
 .../clientpositive/outer_join_ppr.q.java1.8.out |  855 -------------
 .../results/clientpositive/outer_join_ppr.q.out |  683 +++++++++++
 .../parquet_map_null.q.java1.7.out              |   70 --
 .../parquet_map_null.q.java1.8.out              |   70 --
 .../clientpositive/parquet_map_null.q.out       |   68 ++
 .../clientpositive/plan_json.q.java1.7.out      |   13 -
 .../clientpositive/plan_json.q.java1.8.out      |   13 -
 .../test/results/clientpositive/plan_json.q.out |   11 +
 .../clientpositive/spark/join0.q.java1.7.out    |  238 ----
 .../clientpositive/spark/join0.q.java1.8.out    |  238 ----
 .../results/clientpositive/spark/join0.q.out    |   20 +-
 .../spark/list_bucket_dml_10.q.java1.7.out      |  252 ----
 .../spark/list_bucket_dml_10.q.java1.8.out      |  280 -----
 .../spark/list_bucket_dml_10.q.out              |  250 ++++
 .../spark/list_bucket_dml_2.q.java1.7.out       |  591 ---------
 .../spark/list_bucket_dml_2.q.java1.8.out       |  663 -----------
 .../spark/list_bucket_dml_2.q.out               |  Bin 28667 -> 27128 bytes
 .../spark/outer_join_ppr.q.java1.7.out          |  709 -----------
 .../spark/outer_join_ppr.q.java1.8.out          |  879 --------------
 .../clientpositive/spark/outer_join_ppr.q.out   |  490 ++------
 .../spark/subquery_multiinsert.q.java1.7.out    |  886 --------------
 .../spark/subquery_multiinsert.q.java1.8.out    |  890 --------------
 .../spark/subquery_multiinsert.q.out            |   56 +-
 .../spark/vector_cast_constant.q.java1.7.out    |  217 ----
 .../spark/vector_cast_constant.q.java1.8.out    |  203 ----
 .../spark/vector_cast_constant.q.out            |   54 +-
 .../stats_list_bucket.q.java1.7.out             |  191 ---
 .../stats_list_bucket.q.java1.8.out             |  193 ---
 .../clientpositive/stats_list_bucket.q.out      |  189 +++
 .../clientpositive/str_to_map.q.java1.7.out     |  220 ----
 .../clientpositive/str_to_map.q.java1.8.out     |  219 ----
 .../results/clientpositive/str_to_map.q.out     |  216 ++++
 .../subquery_multiinsert.q.java1.7.out          |  999 ----------------
 .../subquery_multiinsert.q.java1.8.out          |  999 ----------------
 .../clientpositive/subquery_multiinsert.q.out   |  997 ++++++++++++++++
 .../subquery_notin_having.q.java1.7.out         |  766 ------------
 .../subquery_notin_having.q.java1.8.out         |  762 ------------
 .../clientpositive/subquery_notin_having.q.out  |  764 ++++++++++++
 .../clientpositive/tez/join0.q.java1.7.out      |  239 ----
 .../clientpositive/tez/join0.q.java1.8.out      |  236 ----
 .../test/results/clientpositive/tez/join0.q.out |  237 ++++
 .../tez/vector_cast_constant.q.java1.7.out      |  218 ----
 .../tez/vector_cast_constant.q.java1.8.out      |  216 ----
 .../tez/vector_cast_constant.q.out              |  214 ++++
 .../clientpositive/varchar_udf1.q.java1.7.out   |  457 -------
 .../clientpositive/varchar_udf1.q.java1.8.out   |  457 -------
 .../results/clientpositive/varchar_udf1.q.out   |  453 +++++++
 .../vector_cast_constant.q.java1.7.out          |  220 ----
 .../vector_cast_constant.q.java1.8.out          |  197 ---
 .../clientpositive/vector_cast_constant.q.out   |   53 +-
 145 files changed, 13120 insertions(+), 32818 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q b/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
index 712ece7..8521631 100644
--- a/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
+++ b/ql/src/test/queries/clientnegative/columnstats_partlvl_invalid_values.q
@@ -1,4 +1,3 @@
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE Employee_Part;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/authorization_explain.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_explain.q b/ql/src/test/queries/clientpositive/authorization_explain.q
index 6a9475c..d429704 100644
--- a/ql/src/test/queries/clientpositive/authorization_explain.q
+++ b/ql/src/test/queries/clientpositive/authorization_explain.q
@@ -2,7 +2,6 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho
 set hive.mapred.mode=nonstrict;
 set hive.security.authorization.enabled=true;
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 explain authorization select * from src join srcpart;
 explain formatted authorization select * from src join srcpart;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/avro_date.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_date.q b/ql/src/test/queries/clientpositive/avro_date.q
index 15c07de..7169822 100644
--- a/ql/src/test/queries/clientpositive/avro_date.q
+++ b/ql/src/test/queries/clientpositive/avro_date.q
@@ -1,5 +1,4 @@
 set hive.mapred.mode=nonstrict;
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE avro_date_staging;
 DROP TABLE avro_date;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/avro_deserialize_map_null.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_deserialize_map_null.q b/ql/src/test/queries/clientpositive/avro_deserialize_map_null.q
index 962e649..42258d9 100644
--- a/ql/src/test/queries/clientpositive/avro_deserialize_map_null.q
+++ b/ql/src/test/queries/clientpositive/avro_deserialize_map_null.q
@@ -4,7 +4,6 @@
 -- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
 -- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE IF EXISTS avro_table;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/avro_nullable_fields.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_nullable_fields.q b/ql/src/test/queries/clientpositive/avro_nullable_fields.q
index 9ba7441..cb398d6 100644
--- a/ql/src/test/queries/clientpositive/avro_nullable_fields.q
+++ b/ql/src/test/queries/clientpositive/avro_nullable_fields.q
@@ -1,6 +1,5 @@
 -- Verify that nullable fields properly work
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 CREATE TABLE test_serializer(string1 STRING,
                              int1 INT,

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/avro_timestamp.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_timestamp.q b/ql/src/test/queries/clientpositive/avro_timestamp.q
index 7bf0dc8..847f250 100644
--- a/ql/src/test/queries/clientpositive/avro_timestamp.q
+++ b/ql/src/test/queries/clientpositive/avro_timestamp.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
 -- EXCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE avro_timestamp_staging;
 DROP TABLE avro_timestamp;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/cbo_rp_outer_join_ppr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/cbo_rp_outer_join_ppr.q b/ql/src/test/queries/clientpositive/cbo_rp_outer_join_ppr.q
index c497ce9..d8f726e 100644
--- a/ql/src/test/queries/clientpositive/cbo_rp_outer_join_ppr.q
+++ b/ql/src/test/queries/clientpositive/cbo_rp_outer_join_ppr.q
@@ -4,7 +4,6 @@ set hive.cbo.returnpath.hiveop=true;
 set hive.optimize.ppd=true;
 
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 EXPLAIN EXTENDED
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/char_udf1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/char_udf1.q b/ql/src/test/queries/clientpositive/char_udf1.q
index 09012b4..39aa0e0 100644
--- a/ql/src/test/queries/clientpositive/char_udf1.q
+++ b/ql/src/test/queries/clientpositive/char_udf1.q
@@ -4,7 +4,6 @@ create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20));
 insert overwrite table char_udf_1
   select key, value, key, value from src where key = '238' limit 1;
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- UDFs with char support
 select 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/input4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/input4.q b/ql/src/test/queries/clientpositive/input4.q
index 83edbe2..90fcbdd 100644
--- a/ql/src/test/queries/clientpositive/input4.q
+++ b/ql/src/test/queries/clientpositive/input4.q
@@ -1,4 +1,3 @@
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE;
 EXPLAIN

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/join0.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join0.q b/ql/src/test/queries/clientpositive/join0.q
index 66f2ef3..3252847 100644
--- a/ql/src/test/queries/clientpositive/join0.q
+++ b/ql/src/test/queries/clientpositive/join0.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
--- JAVA_VERSION_SPECIFIC_OUTPUT
 -- SORT_QUERY_RESULTS
 
 EXPLAIN

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_10.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_10.q b/ql/src/test/queries/clientpositive/list_bucket_dml_10.q
index 5d3dade..f25c174 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_10.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_10.q
@@ -1,7 +1,6 @@
 set mapred.input.dir.recursive=true;
 
 -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- list bucketing DML: static partition. multiple skewed columns.
 -- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
index 2d22d66..8ac1627 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_11.q
@@ -6,7 +6,6 @@ set hive.merge.mapredfiles=false;
 -- Ensure it works if skewed column is not the first column in the table columns
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- list bucketing DML: static partition. multiple skewed columns.
 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
index ac063cc..9facfa5 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_12.q
@@ -7,7 +7,6 @@ set hive.merge.mapredfiles=false;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
 create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_13.q b/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
index d68ca93..0fe7f61 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_13.q
@@ -7,7 +7,6 @@ set hive.merge.mapredfiles=false;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
 create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
index 263a002..c6dceab 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_2.q
@@ -10,7 +10,6 @@ set hive.stats.reliable=true;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- list bucketing DML: static partition. multiple skewed columns.
 -- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
index 86ff342..950409d 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_4.q
@@ -9,7 +9,6 @@ set hive.merge.mapredfiles=false;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- list bucketing DML: static partition. multiple skewed columns. merge.
 -- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
index ace7ba9..fce8e2e 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_5.q
@@ -10,7 +10,6 @@ set mapred.input.dir.recursive=true;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- create a skewed table
 create table list_bucketing_dynamic_part (key String, value String) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
index 5684788..631c938 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_6.q
@@ -47,7 +47,6 @@ set hive.merge.mapredfiles=false;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- create a skewed table
 create table list_bucketing_dynamic_part (key String, value String) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
index d904543..6d73896 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_8.q
@@ -48,7 +48,6 @@ set hive.merge.mapredfiles=false;
 -- 118 000002_0 
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- create a skewed table
 create table list_bucketing_dynamic_part (key String, value String) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
index 620750c..d2e24af 100644
--- a/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
+++ b/ql/src/test/queries/clientpositive/list_bucket_dml_9.q
@@ -9,7 +9,6 @@ set hive.merge.mapredfiles=false;
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- list bucketing DML: static partition. multiple skewed columns. merge.
 -- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/outer_join_ppr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/outer_join_ppr.q b/ql/src/test/queries/clientpositive/outer_join_ppr.q
index 497a4d1..60a06ae 100644
--- a/ql/src/test/queries/clientpositive/outer_join_ppr.q
+++ b/ql/src/test/queries/clientpositive/outer_join_ppr.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.optimize.ppd=true;
 
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 EXPLAIN EXTENDED
  FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/parquet_map_null.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/parquet_map_null.q b/ql/src/test/queries/clientpositive/parquet_map_null.q
index 61058f3..e154159 100644
--- a/ql/src/test/queries/clientpositive/parquet_map_null.q
+++ b/ql/src/test/queries/clientpositive/parquet_map_null.q
@@ -1,5 +1,4 @@
 -- This test attempts to write a parquet table from an avro table that contains map null values
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE IF EXISTS avro_table;
 DROP TABLE IF EXISTS parquet_table;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/plan_json.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/plan_json.q b/ql/src/test/queries/clientpositive/plan_json.q
index aa2b134..503b55d 100644
--- a/ql/src/test/queries/clientpositive/plan_json.q
+++ b/ql/src/test/queries/clientpositive/plan_json.q
@@ -1,5 +1,4 @@
 -- explain plan json:  the query gets the formatted json output of the query plan of the hive query
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 EXPLAIN FORMATTED SELECT count(1) FROM src;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/stats_list_bucket.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_list_bucket.q b/ql/src/test/queries/clientpositive/stats_list_bucket.q
index 51137a8..536702c 100644
--- a/ql/src/test/queries/clientpositive/stats_list_bucket.q
+++ b/ql/src/test/queries/clientpositive/stats_list_bucket.q
@@ -1,6 +1,5 @@
 
 -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 drop table stats_list_bucket;
 drop table stats_list_bucket_1;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/str_to_map.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/str_to_map.q b/ql/src/test/queries/clientpositive/str_to_map.q
index f2993b1..3280d89 100644
--- a/ql/src/test/queries/clientpositive/str_to_map.q
+++ b/ql/src/test/queries/clientpositive/str_to_map.q
@@ -1,7 +1,6 @@
 set hive.mapred.mode=nonstrict;
 set hive.fetch.task.conversion=more;
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 desc function str_to_map;
 desc function extended str_to_map;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/subquery_multiinsert.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_multiinsert.q b/ql/src/test/queries/clientpositive/subquery_multiinsert.q
index bea2e13..9d70f51 100644
--- a/ql/src/test/queries/clientpositive/subquery_multiinsert.q
+++ b/ql/src/test/queries/clientpositive/subquery_multiinsert.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecutePrinter,org.apache.hadoop.hive.ql.hooks.PrintCompletedTasksHook;
 
 -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 CREATE TABLE src_4(
   key STRING, 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/subquery_notin_having.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_notin_having.q b/ql/src/test/queries/clientpositive/subquery_notin_having.q
index 8b2914d..05148df 100644
--- a/ql/src/test/queries/clientpositive/subquery_notin_having.q
+++ b/ql/src/test/queries/clientpositive/subquery_notin_having.q
@@ -1,6 +1,5 @@
 set hive.mapred.mode=nonstrict;
 -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 explain
 select key, count(*) 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/varchar_udf1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/varchar_udf1.q b/ql/src/test/queries/clientpositive/varchar_udf1.q
index ff40b31..4d1f884 100644
--- a/ql/src/test/queries/clientpositive/varchar_udf1.q
+++ b/ql/src/test/queries/clientpositive/varchar_udf1.q
@@ -4,7 +4,6 @@ create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)
 insert overwrite table varchar_udf_1
   select key, value, key, value from src where key = '238' limit 1;
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 -- UDFs with varchar support
 select 

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/queries/clientpositive/vector_cast_constant.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_cast_constant.q b/ql/src/test/queries/clientpositive/vector_cast_constant.q
index c50dd8f..94bee09 100644
--- a/ql/src/test/queries/clientpositive/vector_cast_constant.q
+++ b/ql/src/test/queries/clientpositive/vector_cast_constant.q
@@ -2,7 +2,6 @@ set hive.mapred.mode=nonstrict;
 set hive.explain.user=false;
 SET hive.vectorized.execution.enabled=true;
 
--- JAVA_VERSION_SPECIFIC_OUTPUT
 
 DROP TABLE over1k;
 DROP TABLE over1korc;

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.7.out b/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.7.out
deleted file mode 100644
index 4ea70e3..0000000
--- a/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.7.out
+++ /dev/null
@@ -1,73 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE Employee_Part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE Employee_Part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
-row format delimited fields terminated by '|'  stored as textfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@Employee_Part
-POSTHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
-row format delimited fields terminated by '|'  stored as textfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@Employee_Part
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
-FAILED: SemanticException [Error 30007]: Invalid partitioning key/value specified in ANALYZE statement : {employeesalary=4000.0, country=Canada}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.8.out b/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.8.out
deleted file mode 100644
index 7cae55e..0000000
--- a/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.java1.8.out
+++ /dev/null
@@ -1,73 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE Employee_Part
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE Employee_Part
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
-row format delimited fields terminated by '|'  stored as textfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@Employee_Part
-POSTHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
-row format delimited fields terminated by '|'  stored as textfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@Employee_Part
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
-PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK')
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@employee_part
-POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK')
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@employee_part
-POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
-FAILED: SemanticException [Error 30007]: Invalid partitioning key/value specified in ANALYZE statement : {country=Canada, employeesalary=4000.0}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.out b/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.out
new file mode 100644
index 0000000..3261f78
--- /dev/null
+++ b/ql/src/test/results/clientnegative/columnstats_partlvl_invalid_values.q.out
@@ -0,0 +1,69 @@
+PREHOOK: query: DROP TABLE Employee_Part
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE Employee_Part
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
+row format delimited fields terminated by '|'  stored as textfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@Employee_Part
+POSTHOOK: query: CREATE TABLE Employee_Part(employeeID int, employeeName String) partitioned by (employeeSalary double, country string)
+row format delimited fields terminated by '|'  stored as textfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@Employee_Part
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@employee_part
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='USA')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=USA
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@employee_part
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='2000.0', country='UK')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=2000.0/country=UK
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@employee_part
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='USA')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=USA
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@employee_part
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='4000.0', country='USA')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=4000.0/country=USA
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@employee_part
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3500.0', country='UK')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=3500.0/country=UK
+PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@employee_part
+POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/employee2.dat" INTO TABLE Employee_Part partition(employeeSalary='3000.0', country='UK')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@employee_part
+POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK
+FAILED: SemanticException [Error 30007]: Invalid partitioning key/value specified in ANALYZE statement : {employeesalary=4000.0, country=Canada}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out b/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
deleted file mode 100644
index fefb50c..0000000
--- a/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
+++ /dev/null
@@ -1,44 +0,0 @@
-Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain authorization select * from src join srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain authorization select * from src join srcpart
-POSTHOOK: type: QUERY
-INPUTS: 
-  default@src
-  default@srcpart
-  default@srcpart@ds=2008-04-08/hr=11
-  default@srcpart@ds=2008-04-08/hr=12
-  default@srcpart@ds=2008-04-09/hr=11
-  default@srcpart@ds=2008-04-09/hr=12
-OUTPUTS: 
-#### A masked pattern was here ####
-CURRENT_USER: 
-  hive_test_user
-OPERATION: 
-  QUERY
-Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: explain formatted authorization select * from src join srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: explain formatted authorization select * from src join srcpart
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-PREHOOK: query: explain authorization use default
-PREHOOK: type: SWITCHDATABASE
-POSTHOOK: query: explain authorization use default
-POSTHOOK: type: SWITCHDATABASE
-INPUTS: 
-  database:default
-OUTPUTS: 
-CURRENT_USER: 
-  hive_test_user
-OPERATION: 
-  SWITCHDATABASE
-PREHOOK: query: explain formatted authorization use default
-PREHOOK: type: SWITCHDATABASE
-POSTHOOK: query: explain formatted authorization use default
-POSTHOOK: type: SWITCHDATABASE
-{"INPUTS":["database:default"],"OUTPUTS":[],"CURRENT_USER":"hive_test_user","OPERATION":"SWITCHDATABASE"}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out b/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
deleted file mode 100644
index b7ec209..0000000
--- a/ql/src/test/results/clientpositive/authorization_explain.q.java1.8.out
+++ /dev/null
@@ -1,47 +0,0 @@
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain authorization select * from src join srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain authorization select * from src join srcpart
-POSTHOOK: type: QUERY
-INPUTS: 
-  default@src
-  default@srcpart
-  default@srcpart@ds=2008-04-08/hr=11
-  default@srcpart@ds=2008-04-08/hr=12
-  default@srcpart@ds=2008-04-09/hr=11
-  default@srcpart@ds=2008-04-09/hr=12
-OUTPUTS: 
-#### A masked pattern was here ####
-CURRENT_USER: 
-  hive_test_user
-OPERATION: 
-  QUERY
-AUTHORIZATION_FAILURES: 
-  No privilege 'Select' found for inputs { database:default, table:src, columnName:key}
-  No privilege 'Select' found for inputs { database:default, table:srcpart, columnName:key}
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: explain formatted authorization select * from src join srcpart
-PREHOOK: type: QUERY
-POSTHOOK: query: explain formatted authorization select * from src join srcpart
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-PREHOOK: query: explain authorization use default
-PREHOOK: type: SWITCHDATABASE
-POSTHOOK: query: explain authorization use default
-POSTHOOK: type: SWITCHDATABASE
-INPUTS: 
-  database:default
-OUTPUTS: 
-CURRENT_USER: 
-  hive_test_user
-OPERATION: 
-  SWITCHDATABASE
-PREHOOK: query: explain formatted authorization use default
-PREHOOK: type: SWITCHDATABASE
-POSTHOOK: query: explain formatted authorization use default
-POSTHOOK: type: SWITCHDATABASE
-{"INPUTS":["database:default"],"OUTPUTS":[],"CURRENT_USER":"hive_test_user","OPERATION":"SWITCHDATABASE"}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/authorization_explain.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.out b/ql/src/test/results/clientpositive/authorization_explain.q.out
new file mode 100644
index 0000000..851b845
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_explain.q.out
@@ -0,0 +1,40 @@
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: explain authorization select * from src join srcpart
+PREHOOK: type: QUERY
+POSTHOOK: query: explain authorization select * from src join srcpart
+POSTHOOK: type: QUERY
+INPUTS: 
+  default@src
+  default@srcpart
+  default@srcpart@ds=2008-04-08/hr=11
+  default@srcpart@ds=2008-04-08/hr=12
+  default@srcpart@ds=2008-04-09/hr=11
+  default@srcpart@ds=2008-04-09/hr=12
+OUTPUTS: 
+#### A masked pattern was here ####
+CURRENT_USER: 
+  hive_test_user
+OPERATION: 
+  QUERY
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: explain formatted authorization select * from src join srcpart
+PREHOOK: type: QUERY
+POSTHOOK: query: explain formatted authorization select * from src join srcpart
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+PREHOOK: query: explain authorization use default
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: explain authorization use default
+POSTHOOK: type: SWITCHDATABASE
+INPUTS: 
+  database:default
+OUTPUTS: 
+CURRENT_USER: 
+  hive_test_user
+OPERATION: 
+  SWITCHDATABASE
+PREHOOK: query: explain formatted authorization use default
+PREHOOK: type: SWITCHDATABASE
+POSTHOOK: query: explain formatted authorization use default
+POSTHOOK: type: SWITCHDATABASE
+{"INPUTS":["database:default"],"OUTPUTS":[],"CURRENT_USER":"hive_test_user","OPERATION":"SWITCHDATABASE"}

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_date.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_date.q.java1.7.out b/ql/src/test/results/clientpositive/avro_date.q.java1.7.out
deleted file mode 100644
index 501b983..0000000
--- a/ql/src/test/results/clientpositive/avro_date.q.java1.7.out
+++ /dev/null
@@ -1,130 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_date_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_date_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_date
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_date
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_date_casts
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_date_casts
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_date_staging (d date, m1 map<string, date>, l1 array<date>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_date_staging
-POSTHOOK: query: CREATE TABLE avro_date_staging (d date, m1 map<string, date>, l1 array<date>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_date_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_date.txt' OVERWRITE INTO TABLE avro_date_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_date_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_date.txt' OVERWRITE INTO TABLE avro_date_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_date_staging
-PREHOOK: query: CREATE TABLE avro_date (d date, m1 map<string, date>, l1 array<date>) 
-  PARTITIONED BY (p1 int, p2 date) 
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' 
-  STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_date
-POSTHOOK: query: CREATE TABLE avro_date (d date, m1 map<string, date>, l1 array<date>) 
-  PARTITIONED BY (p1 int, p2 date) 
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' 
-  STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_date
-PREHOOK: query: INSERT OVERWRITE TABLE avro_date PARTITION(p1=2, p2='2014-09-26') SELECT * FROM avro_date_staging
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date_staging
-PREHOOK: Output: default@avro_date@p1=2/p2=2014-09-26
-POSTHOOK: query: INSERT OVERWRITE TABLE avro_date PARTITION(p1=2, p2='2014-09-26') SELECT * FROM avro_date_staging
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date_staging
-POSTHOOK: Output: default@avro_date@p1=2/p2=2014-09-26
-POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).d SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:d, type:date, comment:null), ]
-POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).l1 SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:l1, type:array<date>, comment:null), ]
-POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).m1 SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:m1, type:map<string,date>, comment:null), ]
-PREHOOK: query: SELECT * FROM avro_date
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-2012-02-21	{"foo":"1980-12-16","bar":"1998-05-07"}	["2011-09-04","2011-09-05"]	2	2014-09-26
-2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
-1947-02-11	{"baz":"1921-12-16"}	["2011-09-05"]	2	2014-09-26
-8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26
-PREHOOK: query: SELECT d, COUNT(d) FROM avro_date GROUP BY d
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT d, COUNT(d) FROM avro_date GROUP BY d
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-1947-02-11	1
-2012-02-21	1
-2014-02-11	1
-8200-02-11	1
-PREHOOK: query: SELECT * FROM avro_date WHERE d!='1947-02-11'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date WHERE d!='1947-02-11'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-2012-02-21	{"foo":"1980-12-16","bar":"1998-05-07"}	["2011-09-04","2011-09-05"]	2	2014-09-26
-2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
-8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26
-PREHOOK: query: SELECT * FROM avro_date WHERE d<'2014-12-21'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date WHERE d<'2014-12-21'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-2012-02-21	{"foo":"1980-12-16","bar":"1998-05-07"}	["2011-09-04","2011-09-05"]	2	2014-09-26
-2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
-1947-02-11	{"baz":"1921-12-16"}	["2011-09-05"]	2	2014-09-26
-PREHOOK: query: SELECT * FROM avro_date WHERE d>'8000-12-01'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date WHERE d>'8000-12-01'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_date.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_date.q.java1.8.out b/ql/src/test/results/clientpositive/avro_date.q.java1.8.out
deleted file mode 100644
index dea51c6..0000000
--- a/ql/src/test/results/clientpositive/avro_date.q.java1.8.out
+++ /dev/null
@@ -1,130 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_date_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_date_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_date
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_date
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_date_casts
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_date_casts
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_date_staging (d date, m1 map<string, date>, l1 array<date>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_date_staging
-POSTHOOK: query: CREATE TABLE avro_date_staging (d date, m1 map<string, date>, l1 array<date>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_date_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_date.txt' OVERWRITE INTO TABLE avro_date_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_date_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_date.txt' OVERWRITE INTO TABLE avro_date_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_date_staging
-PREHOOK: query: CREATE TABLE avro_date (d date, m1 map<string, date>, l1 array<date>) 
-  PARTITIONED BY (p1 int, p2 date) 
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' 
-  STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_date
-POSTHOOK: query: CREATE TABLE avro_date (d date, m1 map<string, date>, l1 array<date>) 
-  PARTITIONED BY (p1 int, p2 date) 
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' 
-  STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_date
-PREHOOK: query: INSERT OVERWRITE TABLE avro_date PARTITION(p1=2, p2='2014-09-26') SELECT * FROM avro_date_staging
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date_staging
-PREHOOK: Output: default@avro_date@p1=2/p2=2014-09-26
-POSTHOOK: query: INSERT OVERWRITE TABLE avro_date PARTITION(p1=2, p2='2014-09-26') SELECT * FROM avro_date_staging
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date_staging
-POSTHOOK: Output: default@avro_date@p1=2/p2=2014-09-26
-POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).d SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:d, type:date, comment:null), ]
-POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).l1 SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:l1, type:array<date>, comment:null), ]
-POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).m1 SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:m1, type:map<string,date>, comment:null), ]
-PREHOOK: query: SELECT * FROM avro_date
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-2012-02-21	{"bar":"1998-05-07","foo":"1980-12-16"}	["2011-09-04","2011-09-05"]	2	2014-09-26
-2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
-1947-02-11	{"baz":"1921-12-16"}	["2011-09-05"]	2	2014-09-26
-8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26
-PREHOOK: query: SELECT d, COUNT(d) FROM avro_date GROUP BY d
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT d, COUNT(d) FROM avro_date GROUP BY d
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-1947-02-11	1
-2012-02-21	1
-2014-02-11	1
-8200-02-11	1
-PREHOOK: query: SELECT * FROM avro_date WHERE d!='1947-02-11'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date WHERE d!='1947-02-11'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-2012-02-21	{"bar":"1998-05-07","foo":"1980-12-16"}	["2011-09-04","2011-09-05"]	2	2014-09-26
-2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
-8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26
-PREHOOK: query: SELECT * FROM avro_date WHERE d<'2014-12-21'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date WHERE d<'2014-12-21'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-2012-02-21	{"bar":"1998-05-07","foo":"1980-12-16"}	["2011-09-04","2011-09-05"]	2	2014-09-26
-2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
-1947-02-11	{"baz":"1921-12-16"}	["2011-09-05"]	2	2014-09-26
-PREHOOK: query: SELECT * FROM avro_date WHERE d>'8000-12-01'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_date
-PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_date WHERE d>'8000-12-01'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_date
-POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
-#### A masked pattern was here ####
-8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_date.q.out b/ql/src/test/results/clientpositive/avro_date.q.out
new file mode 100644
index 0000000..32501cf
--- /dev/null
+++ b/ql/src/test/results/clientpositive/avro_date.q.out
@@ -0,0 +1,126 @@
+PREHOOK: query: DROP TABLE avro_date_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE avro_date_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE avro_date
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE avro_date
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE avro_date_casts
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE avro_date_casts
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE avro_date_staging (d date, m1 map<string, date>, l1 array<date>)
+   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
+   STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_date_staging
+POSTHOOK: query: CREATE TABLE avro_date_staging (d date, m1 map<string, date>, l1 array<date>)
+   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
+   STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_date_staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_date.txt' OVERWRITE INTO TABLE avro_date_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@avro_date_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_date.txt' OVERWRITE INTO TABLE avro_date_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@avro_date_staging
+PREHOOK: query: CREATE TABLE avro_date (d date, m1 map<string, date>, l1 array<date>) 
+  PARTITIONED BY (p1 int, p2 date) 
+  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
+  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' 
+  STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_date
+POSTHOOK: query: CREATE TABLE avro_date (d date, m1 map<string, date>, l1 array<date>) 
+  PARTITIONED BY (p1 int, p2 date) 
+  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' 
+  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':' 
+  STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_date
+PREHOOK: query: INSERT OVERWRITE TABLE avro_date PARTITION(p1=2, p2='2014-09-26') SELECT * FROM avro_date_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_date_staging
+PREHOOK: Output: default@avro_date@p1=2/p2=2014-09-26
+POSTHOOK: query: INSERT OVERWRITE TABLE avro_date PARTITION(p1=2, p2='2014-09-26') SELECT * FROM avro_date_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_date_staging
+POSTHOOK: Output: default@avro_date@p1=2/p2=2014-09-26
+POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).d SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:d, type:date, comment:null), ]
+POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).l1 SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:l1, type:array<date>, comment:null), ]
+POSTHOOK: Lineage: avro_date PARTITION(p1=2,p2=2014-09-26).m1 SIMPLE [(avro_date_staging)avro_date_staging.FieldSchema(name:m1, type:map<string,date>, comment:null), ]
+PREHOOK: query: SELECT * FROM avro_date
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_date
+PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_date
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_date
+POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+2012-02-21	{"bar":"1998-05-07","foo":"1980-12-16"}	["2011-09-04","2011-09-05"]	2	2014-09-26
+2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
+1947-02-11	{"baz":"1921-12-16"}	["2011-09-05"]	2	2014-09-26
+8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26
+PREHOOK: query: SELECT d, COUNT(d) FROM avro_date GROUP BY d
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_date
+PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT d, COUNT(d) FROM avro_date GROUP BY d
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_date
+POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+1947-02-11	1
+2012-02-21	1
+2014-02-11	1
+8200-02-11	1
+PREHOOK: query: SELECT * FROM avro_date WHERE d!='1947-02-11'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_date
+PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_date WHERE d!='1947-02-11'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_date
+POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+2012-02-21	{"bar":"1998-05-07","foo":"1980-12-16"}	["2011-09-04","2011-09-05"]	2	2014-09-26
+2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
+8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26
+PREHOOK: query: SELECT * FROM avro_date WHERE d<'2014-12-21'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_date
+PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_date WHERE d<'2014-12-21'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_date
+POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+2012-02-21	{"bar":"1998-05-07","foo":"1980-12-16"}	["2011-09-04","2011-09-05"]	2	2014-09-26
+2014-02-11	{"baz":"1981-12-16"}	["2011-09-05"]	2	2014-09-26
+1947-02-11	{"baz":"1921-12-16"}	["2011-09-05"]	2	2014-09-26
+PREHOOK: query: SELECT * FROM avro_date WHERE d>'8000-12-01'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_date
+PREHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_date WHERE d>'8000-12-01'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_date
+POSTHOOK: Input: default@avro_date@p1=2/p2=2014-09-26
+#### A masked pattern was here ####
+8200-02-11	{"baz":"6981-12-16"}	["1039-09-05"]	2	2014-09-26

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.7.out b/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.7.out
deleted file mode 100644
index 8f8065e..0000000
--- a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.7.out
+++ /dev/null
@@ -1,57 +0,0 @@
-PREHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema
--- vs record schema have the null values in different positions
--- i.e.
--- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
--- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema
--- vs record schema have the null values in different positions
--- i.e.
--- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
--- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: SELECT * FROM avro_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_table
-#### A masked pattern was here ####
-{"key4":null,"key3":"val3"}
-{"key4":null,"key3":"val3"}
-{"key2":"val2","key1":null}
-{"key4":null,"key3":"val3"}
-{"key4":null,"key3":"val3"}
-PREHOOK: query: DROP TABLE avro_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: DROP TABLE avro_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: default@avro_table

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.8.out b/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.8.out
deleted file mode 100644
index 127d8b3..0000000
--- a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.java1.8.out
+++ /dev/null
@@ -1,57 +0,0 @@
-PREHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema
--- vs record schema have the null values in different positions
--- i.e.
--- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
--- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema
--- vs record schema have the null values in different positions
--- i.e.
--- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
--- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE IF EXISTS avro_table
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_table
-PREHOOK: query: SELECT * FROM avro_table
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_table
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_table
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_table
-#### A masked pattern was here ####
-{"key3":"val3","key4":null}
-{"key3":"val3","key4":null}
-{"key1":null,"key2":"val2"}
-{"key3":"val3","key4":null}
-{"key3":"val3","key4":null}
-PREHOOK: query: DROP TABLE avro_table
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@avro_table
-PREHOOK: Output: default@avro_table
-POSTHOOK: query: DROP TABLE avro_table
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@avro_table
-POSTHOOK: Output: default@avro_table

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out b/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out
new file mode 100644
index 0000000..2d983f1
--- /dev/null
+++ b/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out
@@ -0,0 +1,55 @@
+PREHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema
+-- vs record schema have the null values in different positions
+-- i.e.
+-- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
+-- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
+
+
+DROP TABLE IF EXISTS avro_table
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema
+-- vs record schema have the null values in different positions
+-- i.e.
+-- fileSchema   = [{ "type" : "map", "values" : ["string","null"]}, "null"]
+-- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}]
+
+
+DROP TABLE IF EXISTS avro_table
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: CREATE TABLE avro_table (avreau_col_1 map<string,string>) STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/map_null_val.avro' OVERWRITE INTO TABLE avro_table
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@avro_table
+PREHOOK: query: SELECT * FROM avro_table
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_table
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_table
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_table
+#### A masked pattern was here ####
+{"key3":"val3","key4":null}
+{"key3":"val3","key4":null}
+{"key1":null,"key2":"val2"}
+{"key3":"val3","key4":null}
+{"key3":"val3","key4":null}
+PREHOOK: query: DROP TABLE avro_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@avro_table
+PREHOOK: Output: default@avro_table
+POSTHOOK: query: DROP TABLE avro_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@avro_table
+POSTHOOK: Output: default@avro_table


[37/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
deleted file mode 100644
index 793b8be..0000000
--- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
+++ /dev/null
@@ -1,766 +0,0 @@
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain
-select key, count(*) 
-from src 
-group by key
-having key not in  
-  ( select key  from src s1 
-    where s1.key > '12'
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain
-select key, count(*) 
-from src 
-group by key
-having key not in  
-  ( select key  from src s1 
-    where s1.key > '12'
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: key
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: key (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key > '12') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col3 is null (type: boolean)
-            Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: bigint)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
-              Filter Operator
-                predicate: false (type: boolean)
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: -- non agg, corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- non agg, corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-5
-  Stage-3 depends on stages: Stage-2, Stage-6
-  Stage-4 is a root stage
-  Stage-5 depends on stages: Stage-4
-  Stage-6 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: p_mfgr, p_retailprice
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(p_retailprice)
-                keys: p_mfgr (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: double)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: double)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: double)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: double)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: double)
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string), _col1 (type: double)
-            1 _col0 (type: string), _col1 (type: double)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col3 is null (type: boolean)
-            Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: p_mfgr, p_retailprice
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(p_retailprice), max(p_retailprice)
-                keys: p_mfgr (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0), max(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (((_col2 - _col1) > 600.0) and (_col0 is null or _col1 is null)) (type: boolean)
-            Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: p_mfgr, p_retailprice
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(p_retailprice), max(p_retailprice)
-                keys: p_mfgr (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0), max(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: ((_col2 - _col1) > 600.0) (type: boolean)
-            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part
-#### A masked pattern was here ####
-POSTHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part
-#### A masked pattern was here ####
-Manufacturer#1	1173.15
-Manufacturer#2	1690.68
-Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: -- agg, non corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- agg, non corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-5
-  Stage-3 depends on stages: Stage-2, Stage-6
-  Stage-4 is a root stage
-  Stage-5 depends on stages: Stage-4
-  Stage-6 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: p_mfgr, p_retailprice
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(p_retailprice)
-                keys: p_mfgr (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: double)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: double)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col3 is null (type: boolean)
-            Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: p_mfgr is null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_retailprice (type: double)
-                outputColumnNames: _col1
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1), min(_col1)
-                  keys: null (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0), min(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: double), _col2 (type: double)
-            outputColumnNames: _col1, _col2
-            Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((_col1 - _col2) > 600.0) (type: boolean)
-              Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: p_mfgr, p_retailprice
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: max(p_retailprice), min(p_retailprice)
-                keys: p_mfgr (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0), min(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: ((_col1 - _col2) > 600.0) (type: boolean)
-            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part
-#### A masked pattern was here ####
-POSTHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part
-#### A masked pattern was here ####
-Manufacturer#1	1173.15
-Manufacturer#2	1690.68

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.8.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.8.out
deleted file mode 100644
index 4e227cd..0000000
--- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.8.out
+++ /dev/null
@@ -1,762 +0,0 @@
-Warning: Shuffle Join JOIN[26][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain
-select key, count(*) 
-from src 
-group by key
-having key not in  
-  ( select key  from src s1 
-    where s1.key > '12'
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- non agg, non corr
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-explain
-select key, count(*) 
-from src 
-group by key
-having key not in  
-  ( select key  from src s1 
-    where s1.key > '12'
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2
-  Stage-4 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                keys: _col0 (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: bigint)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: bigint)
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key > '12') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col3 is null (type: boolean)
-            Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: bigint)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '12') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: -- non agg, corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- non agg, corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-5
-  Stage-3 depends on stages: Stage-2, Stage-6
-  Stage-4 is a root stage
-  Stage-5 depends on stages: Stage-4
-  Stage-6 is a root stage
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(_col1)
-                keys: _col0 (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: double)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: double)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: double)
-              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: double)
-              sort order: ++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: double)
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string), _col1 (type: double)
-            1 _col0 (type: string), _col1 (type: double)
-          outputColumnNames: _col0, _col1, _col3
-          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col3 is null (type: boolean)
-            Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(_col1), max(_col1)
-                keys: _col0 (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0), max(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (((_col2 - _col1) > 600.0) and (_col0 is null or _col1 is null)) (type: boolean)
-            Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: count()
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(_col1), max(_col1)
-                keys: _col0 (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0), max(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: ((_col2 - _col1) > 600.0) (type: boolean)
-            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part
-#### A masked pattern was here ####
-POSTHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
-  where min(p_retailprice) = l and r - l > 600
-  )
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part
-#### A masked pattern was here ####
-Manufacturer#1	1173.15
-Manufacturer#2	1690.68
-Warning: Shuffle Join JOIN[39][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
-PREHOOK: query: -- agg, non corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-PREHOOK: type: QUERY
-POSTHOOK: query: -- agg, non corr
-explain
-select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1, Stage-4
-  Stage-3 depends on stages: Stage-2, Stage-6
-  Stage-4 is a root stage
-  Stage-5 is a root stage
-  Stage-6 depends on stages: Stage-5
-  Stage-0 depends on stages: Stage-3
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: min(_col1)
-                keys: _col0 (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: min(VALUE._col0)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: double)
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col2 is null (type: boolean)
-            Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 7 Data size: 865 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: string), _col1 (type: double)
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 7 Data size: 951 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 7 Data size: 951 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: max(_col1), min(_col1)
-                keys: _col0 (type: string)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2
-                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0), min(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: ((_col1 - _col2) > 600.0) (type: boolean)
-            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-5
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: p_mfgr is null (type: boolean)
-              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: null (type: string), p_retailprice (type: double)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1), min(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
-                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: double), _col2 (type: double)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: max(VALUE._col0), min(VALUE._col1)
-          keys: KEY._col0 (type: string)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
-          Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: double), _col2 (type: double)
-            outputColumnNames: _col1, _col2
-            Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((_col1 - _col2) > 600.0) (type: boolean)
-              Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[39][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
-PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-PREHOOK: type: QUERY
-PREHOOK: Input: default@part
-#### A masked pattern was here ####
-POSTHOOK: query: select b.p_mfgr, min(p_retailprice) 
-from part b 
-group by b.p_mfgr
-having b.p_mfgr not in 
-  (select p_mfgr 
-  from part a
-  group by p_mfgr
-  having max(p_retailprice) - min(p_retailprice) > 600
-  )
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@part
-#### A masked pattern was here ####
-Manufacturer#2	1690.68
-Manufacturer#1	1173.15


[22/66] [abbrv] hive git commit: HIVE-13794: HIVE_RPC_QUERY_PLAN should always be set when generating LLAP splits (Jason Dere, reviewed by Siddharth Seth)

Posted by sp...@apache.org.
HIVE-13794: HIVE_RPC_QUERY_PLAN should always be set when generating LLAP splits (Jason Dere, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2d103982
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2d103982
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2d103982

Branch: refs/heads/java8
Commit: 2d1039829a4a6b0978e423fbefabce22568ef8a2
Parents: 115d225
Author: Jason Dere <jd...@hortonworks.com>
Authored: Tue May 24 17:07:05 2016 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Tue May 24 17:07:05 2016 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java  | 3 ---
 .../apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java   | 2 ++
 2 files changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2d103982/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java
index 48b9493..5d0da30 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlap.java
@@ -103,9 +103,6 @@ public class TestJdbcWithMiniLlap {
 
     conf = new HiveConf();
     conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
-    // Necessary for GetSplits()/LlapInputFormat,
-    // the config generated for the query fragment needs to include the MapWork
-    conf.setBoolVar(HiveConf.ConfVars.HIVE_RPC_QUERY_PLAN, true);
 
     conf.addResource(new URL("file://" + new File(confDir).toURI().getPath()
         + "/tez-site.xml"));

http://git-wip-us.apache.org/repos/asf/hive/blob/2d103982/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
index ce69ee6..83d492a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java
@@ -205,6 +205,8 @@ public class GenericUDTFGetSplits extends GenericUDTF {
     HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_MODE, "llap");
     HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TEZ_GENERATE_CONSISTENT_SPLITS, true);
     HiveConf.setBoolVar(conf, HiveConf.ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS, true);
+    // Tez/LLAP requires RPC query plan
+    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_RPC_QUERY_PLAN, true);
 
     try {
       jc = DagUtils.getInstance().createConfiguration(conf);


[46/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
new file mode 100644
index 0000000..81f3af3
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out
@@ -0,0 +1,811 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103:
+-- 99 000000_0
+-- 99 000001_0
+-- after merge
+-- 142 000000_0
+-- ds=2008-04-08/hr=11/key=484:
+-- 87 000000_0
+-- 87 000001_0
+-- after merge
+-- 118 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key) on ('484','103')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103:
+-- 99 000000_0
+-- 99 000001_0
+-- after merge
+-- 142 000000_0
+-- ds=2008-04-08/hr=11/key=484:
+-- 87 000000_0
+-- 87 000001_0
+-- after merge
+-- 118 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key) on ('484','103')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	6                   
+	numRows             	1000                
+	rawDataSize         	9624                
+	totalSize           	10898               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key]               	 
+Skewed Values:      	[[484], [103]]      	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+  Stage: Stage-3
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              partition_columns.types string:string
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-5
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              partition_columns.types string:string
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	1000                
+	rawDataSize         	9624                
+	totalSize           	10786               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key]               	 
+Skewed Values:      	[[484], [103]]      	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from list_bucketing_static_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from list_bucketing_static_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+1000
+PREHOOK: query: explain extended
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              numFiles 4
+              numRows 1000
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 9624
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 10786
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_static_part
+          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	11
+PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	12
+PREHOOK: query: -- clean up
+drop table list_bucketing_static_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- clean up
+drop table list_bucketing_static_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out b/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out
deleted file mode 100644
index 5651839..0000000
--- a/ql/src/test/results/clientpositive/llap/join0.q.java1.7.out
+++ /dev/null
@@ -1,242 +0,0 @@
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-            Execution mode: llap
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Execution mode: uber
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/llap/join0.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join0.q.java1.8.out b/ql/src/test/results/clientpositive/llap/join0.q.java1.8.out
deleted file mode 100644
index 5651839..0000000
--- a/ql/src/test/results/clientpositive/llap/join0.q.java1.8.out
+++ /dev/null
@@ -1,242 +0,0 @@
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-            Execution mode: llap
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-            Execution mode: llap
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Execution mode: uber
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/llap/join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/join0.q.out b/ql/src/test/results/clientpositive/llap/join0.q.out
new file mode 100644
index 0000000..f177afc
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/join0.q.out
@@ -0,0 +1,243 @@
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+            Execution mode: llap
+            LLAP IO: no inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  sort order: ++++
+                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	2	val_2
+0	val_0	2	val_2
+0	val_0	2	val_2
+0	val_0	4	val_4
+0	val_0	4	val_4
+0	val_0	4	val_4
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	8	val_8
+0	val_0	8	val_8
+0	val_0	8	val_8
+0	val_0	9	val_9
+0	val_0	9	val_9
+0	val_0	9	val_9
+2	val_2	0	val_0
+2	val_2	0	val_0
+2	val_2	0	val_0
+2	val_2	2	val_2
+2	val_2	4	val_4
+2	val_2	5	val_5
+2	val_2	5	val_5
+2	val_2	5	val_5
+2	val_2	8	val_8
+2	val_2	9	val_9
+4	val_4	0	val_0
+4	val_4	0	val_0
+4	val_4	0	val_0
+4	val_4	2	val_2
+4	val_4	4	val_4
+4	val_4	5	val_5
+4	val_4	5	val_5
+4	val_4	5	val_5
+4	val_4	8	val_8
+4	val_4	9	val_9
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	2	val_2
+5	val_5	2	val_2
+5	val_5	2	val_2
+5	val_5	4	val_4
+5	val_5	4	val_4
+5	val_5	4	val_4
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	8	val_8
+5	val_5	8	val_8
+5	val_5	8	val_8
+5	val_5	9	val_9
+5	val_5	9	val_9
+5	val_5	9	val_9
+8	val_8	0	val_0
+8	val_8	0	val_0
+8	val_8	0	val_0
+8	val_8	2	val_2
+8	val_8	4	val_4
+8	val_8	5	val_5
+8	val_8	5	val_5
+8	val_8	5	val_5
+8	val_8	8	val_8
+8	val_8	9	val_9
+9	val_9	0	val_0
+9	val_9	0	val_0
+9	val_9	0	val_0
+9	val_9	2	val_2
+9	val_9	4	val_4
+9	val_9	5	val_5
+9	val_9	5	val_5
+9	val_9	5	val_5
+9	val_9	8	val_8
+9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.7.out
deleted file mode 100644
index 22b5d93..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.7.out
+++ /dev/null
@@ -1,217 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: i (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: avg(50), avg(50.0), avg(50)
-                      keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-            Execution mode: vectorized, llap
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
-        Reducer 3 
-            Execution mode: vectorized, uber
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.8.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.8.out
deleted file mode 100644
index 22b5d93..0000000
--- a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.java1.8.out
+++ /dev/null
@@ -1,217 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: i (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: avg(50), avg(50.0), avg(50)
-                      keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-            Execution mode: vectorized, llap
-        Reducer 2 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
-        Reducer 3 
-            Execution mode: vectorized, uber
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50


[07/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_udf_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_udf_case.q.out b/ql/src/test/results/clientpositive/ppd_udf_case.q.out
index 1c1c2a4..56941cd 100644
--- a/ql/src/test/results/clientpositive/ppd_udf_case.q.out
+++ b/ql/src/test/results/clientpositive/ppd_udf_case.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT *
 FROM srcpart a JOIN srcpart b
@@ -37,32 +38,28 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((ds = '2008-04-08') and CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END and key is not null) (type: boolean)
+              predicate: ((ds = '2008-04-08') and (key = '27')) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col3
+                expressions: value (type: string), hr (type: string)
+                outputColumnNames: _col1, _col3
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
+                  sort order: 
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string), _col3 (type: string)
           TableScan
             alias: a
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((ds = '2008-04-08') and CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END and key is not null) (type: boolean)
+              predicate: ((ds = '2008-04-08') and (key = '27')) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col3
+                expressions: value (type: string), hr (type: string)
+                outputColumnNames: _col1, _col3
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
+                  sort order: 
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string), _col3 (type: string)
       Reduce Operator Tree:
@@ -70,9 +67,9 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col7
+            0 
+            1 
+          outputColumnNames: _col1, _col3, _col5, _col7
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -86,12 +83,12 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string), '2008-04-08' (type: string), _col7 (type: string)
+              key expressions: '27' (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string), '27' (type: string), _col5 (type: string), '2008-04-08' (type: string), _col7 (type: string)
               sort order: ++++++++
               Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), '2008-04-08' (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string), '2008-04-08' (type: string), KEY.reducesinkkey7 (type: string)
+          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey5 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey7 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -108,6 +105,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT *
 FROM srcpart a JOIN srcpart b
 ON a.key = b.key
@@ -144,6 +142,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 27	val_27	2008-04-08	11	27	val_27	2008-04-08	12
 27	val_27	2008-04-08	12	27	val_27	2008-04-08	11
 27	val_27	2008-04-08	12	27	val_27	2008-04-08	12
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT *
 FROM srcpart a JOIN srcpart b
@@ -183,32 +182,28 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END and key is not null) (type: boolean)
+              predicate: (key = '27') (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col3
+                expressions: value (type: string), hr (type: string)
+                outputColumnNames: _col1, _col3
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
+                  sort order: 
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string), _col3 (type: string)
           TableScan
             alias: a
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END and key is not null) (type: boolean)
+              predicate: (key = '27') (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col3
+                expressions: value (type: string), hr (type: string)
+                outputColumnNames: _col1, _col3
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
+                  sort order: 
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string), _col3 (type: string)
       Reduce Operator Tree:
@@ -216,9 +211,9 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col7
+            0 
+            1 
+          outputColumnNames: _col1, _col3, _col5, _col7
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -232,12 +227,12 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string), '2008-04-08' (type: string), _col7 (type: string)
+              key expressions: '27' (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string), '27' (type: string), _col5 (type: string), '2008-04-08' (type: string), _col7 (type: string)
               sort order: ++++++++
               Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), '2008-04-08' (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string), '2008-04-08' (type: string), KEY.reducesinkkey7 (type: string)
+          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey5 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey7 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -254,6 +249,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT *
 FROM srcpart a JOIN srcpart b
 ON a.key = b.key

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_union_view.q.out b/ql/src/test/results/clientpositive/ppd_union_view.q.out
index 435b6f9..3081e28 100644
--- a/ql/src/test/results/clientpositive/ppd_union_view.q.out
+++ b/ql/src/test/results/clientpositive/ppd_union_view.q.out
@@ -156,14 +156,14 @@ STAGE PLANS:
               predicate: keymap is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: keymap (type: string), value (type: string), ds (type: string)
-                outputColumnNames: _col0, _col1, _col2
+                expressions: keymap (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col2 (type: string)
-                  null sort order: aa
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col2 (type: string)
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
@@ -177,14 +177,14 @@ STAGE PLANS:
               predicate: keymap is not null (type: boolean)
               Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), keymap (type: string), ds (type: string)
-                outputColumnNames: _col0, _col1, _col2
+                expressions: key (type: string), keymap (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col1 (type: string), _col2 (type: string)
-                  null sort order: aa
-                  sort order: ++
-                  Map-reduce partition columns: _col1 (type: string), _col2 (type: string)
+                  key expressions: _col1 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   tag: 1
                   value expressions: _col0 (type: string)
@@ -291,8 +291,8 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: string), _col2 (type: string)
-            1 _col1 (type: string), _col2 (type: string)
+            0 _col0 (type: string)
+            1 _col1 (type: string)
           outputColumnNames: _col1, _col3
           Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE
           Select Operator
@@ -488,10 +488,10 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), '2011-10-15' (type: string)
-                  null sort order: aa
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), '2011-10-15' (type: string)
+                  key expressions: _col0 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   tag: 0
                   value expressions: _col1 (type: string)
@@ -509,10 +509,10 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col1 (type: string), '2011-10-15' (type: string)
-                  null sort order: aa
-                  sort order: ++
-                  Map-reduce partition columns: _col1 (type: string), '2011-10-15' (type: string)
+                  key expressions: _col1 (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   tag: 1
                   value expressions: _col0 (type: string)
@@ -523,8 +523,8 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col0 (type: string), _col2 (type: string)
-            1 _col1 (type: string), _col2 (type: string)
+            0 _col0 (type: string)
+            1 _col1 (type: string)
           outputColumnNames: _col1, _col3
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/quotedid_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out
index 76bd883..4f79a46 100644
--- a/ql/src/test/results/clientpositive/quotedid_basic.q.out
+++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out
@@ -102,23 +102,23 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: x+1 (type: string), y&y (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: x+1, y&y
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string), '1' (type: string)
+                  keys: x+1 (type: string), y&y (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string), '1' (type: string)
-                    sort order: +++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string), '1' (type: string)
+                    key expressions: _col0 (type: string), _col1 (type: string)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), '1' (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
             expressions: _col0 (type: string), _col1 (type: string), '1' (type: string)
@@ -161,34 +161,30 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: x+1 (type: string), y&y (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: x+1, y&y
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string), '1' (type: string)
+                  keys: x+1 (type: string), y&y (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string), '1' (type: string)
-                    sort order: +++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string), '1' (type: string)
+                    key expressions: _col0 (type: string), _col1 (type: string)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), '1' (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -202,20 +198,20 @@ STAGE PLANS:
               value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), '1' (type: string)
-          outputColumnNames: _col0, _col1, _col2
+          expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
                   input alias: ptf_0
-                  output shape: _col0: string, _col1: string, _col2: string
+                  output shape: _col0: string, _col1: string
                   type: WINDOWING
                 Windowing table definition
                   input alias: ptf_1
                   name: windowingtablefunction
                   order by: _col1 ASC NULLS FIRST
-                  partition by: _col2
+                  partition by: '1'
                   raw input shape:
                   window functions:
                       window function definition
@@ -227,7 +223,7 @@ STAGE PLANS:
                         isPivotResult: true
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), rank_window_0 (type: int)
+              expressions: _col0 (type: string), _col1 (type: string), '1' (type: string), rank_window_0 (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator
@@ -269,34 +265,30 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: x+1 (type: string), y&y (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: x+1, y&y
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string), '1' (type: string)
+                  keys: x+1 (type: string), y&y (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string), '1' (type: string)
-                    sort order: +++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string), '1' (type: string)
+                    key expressions: _col0 (type: string), _col1 (type: string)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: string), '1' (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: string)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -310,20 +302,20 @@ STAGE PLANS:
               value expressions: _col0 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), '1' (type: string)
-          outputColumnNames: _col0, _col1, _col2
+          expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string)
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           PTF Operator
             Function definitions:
                 Input definition
                   input alias: ptf_0
-                  output shape: _col0: string, _col1: string, _col2: string
+                  output shape: _col0: string, _col1: string
                   type: WINDOWING
                 Windowing table definition
                   input alias: ptf_1
                   name: windowingtablefunction
                   order by: _col1 ASC NULLS FIRST
-                  partition by: _col2
+                  partition by: '1'
                   raw input shape:
                   window functions:
                       window function definition
@@ -335,7 +327,7 @@ STAGE PLANS:
                         isPivotResult: true
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), rank_window_0 (type: int)
+              expressions: _col0 (type: string), _col1 (type: string), '1' (type: string), rank_window_0 (type: int)
               outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/quotedid_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/quotedid_partition.q.out b/ql/src/test/results/clientpositive/quotedid_partition.q.out
index 66cff2a..fe4c5db 100644
--- a/ql/src/test/results/clientpositive/quotedid_partition.q.out
+++ b/ql/src/test/results/clientpositive/quotedid_partition.q.out
@@ -47,26 +47,26 @@ STAGE PLANS:
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: y&y (type: string)
-                outputColumnNames: _col1
+                outputColumnNames: y&y
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: '10' (type: string), _col1 (type: string), 'a' (type: string)
+                  keys: y&y (type: string)
                   mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                  outputColumnNames: _col0
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: '10' (type: string), _col1 (type: string), 'a' (type: string)
-                    sort order: +++
-                    Map-reduce partition columns: '10' (type: string), _col1 (type: string), 'a' (type: string)
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: '10' (type: string), KEY._col1 (type: string), 'a' (type: string)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0
           Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: '10' (type: string), _col1 (type: string), 'a' (type: string)
+            expressions: '10' (type: string), _col0 (type: string), 'a' (type: string)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
index a2a06b2..eabf9d9 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
@@ -65,12 +65,12 @@ STAGE PLANS:
           GatherStats: false
           Filter Operator
             isSamplingPred: false
-            predicate: ((rand(1) < 0.1) and ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0))) (type: boolean)
-            Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+            predicate: ((rand(1) < 0.1) and (not ((UDFToDouble(key) > 50.0) or (UDFToDouble(key) < 10.0)))) (type: boolean)
+            Statistics: Num rows: 56 Data size: 594 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 56 Data size: 594 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2'
@@ -153,12 +153,12 @@ STAGE PLANS:
           GatherStats: false
           Filter Operator
             isSamplingPred: false
-            predicate: ((UDFToDouble(key) <= 50.0) and (UDFToDouble(key) >= 10.0)) (type: boolean)
-            Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+            predicate: (not ((UDFToDouble(key) > 50.0) or (UDFToDouble(key) < 10.0))) (type: boolean)
+            Statistics: Num rows: 168 Data size: 1784 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string), '2008-04-08' (type: string), hr (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 168 Data size: 1784 Basic stats: COMPLETE Column stats: NONE
               ListSink
 
 PREHOOK: query: select a.* from srcpart a where a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2'

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/recursive_dir.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/recursive_dir.q.out b/ql/src/test/results/clientpositive/recursive_dir.q.out
index 599b255..8789a2d 100644
--- a/ql/src/test/results/clientpositive/recursive_dir.q.out
+++ b/ql/src/test/results/clientpositive/recursive_dir.q.out
@@ -32,7 +32,7 @@ SELECT key+11 FROM src WHERE key=484
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@fact_tz@ds=1/hr=1
-POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION []
 PREHOOK: query: ALTER TABLE fact_daily SET TBLPROPERTIES('EXTERNAL'='TRUE')
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@fact_daily

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/semijoin4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin4.q.out b/ql/src/test/results/clientpositive/semijoin4.q.out
index 015dad1..e557be1 100644
--- a/ql/src/test/results/clientpositive/semijoin4.q.out
+++ b/ql/src/test/results/clientpositive/semijoin4.q.out
@@ -69,32 +69,32 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToInteger(tinyint_col_46) = -92) and decimal1309_col_65 is not null and bigint_col_13 is not null and UDFToInteger(tinyint_col_46) is not null) (type: boolean)
+              predicate: ((UDFToInteger(tinyint_col_46) = -92) and decimal1309_col_65 is not null and bigint_col_13 is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: bigint_col_13 (type: bigint), smallint_col_24 (type: smallint), double_col_60 (type: double), decimal1309_col_65 (type: decimal(13,9))
-                outputColumnNames: _col0, _col1, _col3, _col4
+                expressions: bigint_col_13 (type: bigint), smallint_col_24 (type: smallint), tinyint_col_46 (type: tinyint), double_col_60 (type: double), decimal1309_col_65 (type: decimal(13,9))
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: bigint), _col4 (type: decimal(27,9)), -92 (type: tinyint)
+                  key expressions: _col0 (type: bigint), _col4 (type: decimal(27,9)), _col2 (type: tinyint)
                   sort order: +++
-                  Map-reduce partition columns: _col0 (type: bigint), _col4 (type: decimal(27,9)), -92 (type: tinyint)
+                  Map-reduce partition columns: _col0 (type: bigint), _col4 (type: decimal(27,9)), _col2 (type: tinyint)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col1 (type: smallint), _col3 (type: double)
           TableScan
             alias: t2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToInteger(tinyint_col_21) = -92) and tinyint_col_18 is not null and decimal2709_col_9 is not null and UDFToInteger(tinyint_col_21) is not null) (type: boolean)
+              predicate: ((UDFToInteger(tinyint_col_21) = -92) and tinyint_col_18 is not null and decimal2709_col_9 is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: decimal2709_col_9 (type: decimal(27,9)), tinyint_col_18 (type: tinyint)
-                outputColumnNames: _col0, _col1
+                expressions: decimal2709_col_9 (type: decimal(27,9)), tinyint_col_18 (type: tinyint), tinyint_col_21 (type: tinyint)
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: UDFToLong(_col1) (type: bigint), _col0 (type: decimal(27,9)), -92 (type: tinyint)
+                  key expressions: UDFToLong(_col1) (type: bigint), _col0 (type: decimal(27,9)), _col2 (type: tinyint)
                   sort order: +++
-                  Map-reduce partition columns: UDFToLong(_col1) (type: bigint), _col0 (type: decimal(27,9)), -92 (type: tinyint)
+                  Map-reduce partition columns: UDFToLong(_col1) (type: bigint), _col0 (type: decimal(27,9)), _col2 (type: tinyint)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Join Operator
@@ -103,11 +103,11 @@ STAGE PLANS:
           keys:
             0 _col0 (type: bigint), _col4 (type: decimal(27,9)), _col2 (type: tinyint)
             1 UDFToLong(_col1) (type: bigint), _col0 (type: decimal(27,9)), _col2 (type: tinyint)
-          outputColumnNames: _col1, _col3
+          outputColumnNames: _col1, _col3, _col7
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: smallint), _col3 (type: double)
-            outputColumnNames: _col0, _col1
+            expressions: _col1 (type: smallint), _col3 (type: double), _col7 (type: tinyint), UDFToInteger(_col7) (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
               compressed: false
@@ -121,16 +121,16 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: -92 (type: int)
+              key expressions: _col3 (type: int)
               sort order: +
-              Map-reduce partition columns: -92 (type: int)
+              Map-reduce partition columns: _col3 (type: int)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col0 (type: smallint), _col1 (type: double)
+              value expressions: _col0 (type: smallint), _col1 (type: double), _col2 (type: tinyint)
           TableScan
             Reduce Output Operator
-              key expressions: -92 (type: int)
+              key expressions: _col0 (type: int)
               sort order: +
-              Map-reduce partition columns: -92 (type: int)
+              Map-reduce partition columns: _col0 (type: int)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Join Operator
@@ -139,7 +139,7 @@ STAGE PLANS:
           keys:
             0 _col3 (type: int)
             1 _col0 (type: int)
-          outputColumnNames: _col0, _col1
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
             compressed: false
@@ -153,14 +153,14 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: (-92 + _col0) (type: smallint), floor(_col1) (type: bigint)
+              key expressions: (UDFToShort(_col2) + _col0) (type: smallint), floor(_col1) (type: bigint)
               sort order: +-
-              Map-reduce partition columns: (-92 + _col0) (type: smallint)
+              Map-reduce partition columns: (UDFToShort(_col2) + _col0) (type: smallint)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col0 (type: smallint), _col1 (type: double)
+              value expressions: _col0 (type: smallint), _col1 (type: double), _col2 (type: tinyint)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: double), -92 (type: tinyint)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: double), VALUE._col2 (type: tinyint)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           PTF Operator
@@ -237,17 +237,21 @@ STAGE PLANS:
             0 _col0 (type: decimal(19,11)), _col1 (type: timestamp)
             1 _col0 (type: decimal(19,11)), _col1 (type: timestamp)
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Group By Operator
-            keys: -92 (type: int)
-            mode: hash
+          Select Operator
+            expressions: -92 (type: int)
             outputColumnNames: _col0
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            Group By Operator
+              keys: _col0 (type: int)
+              mode: hash
+              outputColumnNames: _col0
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/semijoin5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin5.q.out b/ql/src/test/results/clientpositive/semijoin5.q.out
index 70d705a..6ecc693 100644
--- a/ql/src/test/results/clientpositive/semijoin5.q.out
+++ b/ql/src/test/results/clientpositive/semijoin5.q.out
@@ -78,18 +78,18 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((UDFToInteger(smallint_col_19) = -92) and tinyint_col_20 is not null and decimal2709_col_9 is not null and tinyint_col_15 is not null and UDFToInteger(smallint_col_19) is not null) (type: boolean)
+              predicate: ((UDFToInteger(smallint_col_19) = -92) and tinyint_col_20 is not null and decimal2709_col_9 is not null and tinyint_col_15 is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: decimal2709_col_9 (type: decimal(27,9)), int_col_10 (type: int), tinyint_col_15 (type: tinyint), tinyint_col_20 (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col4
+                expressions: decimal2709_col_9 (type: decimal(27,9)), int_col_10 (type: int), tinyint_col_15 (type: tinyint), smallint_col_19 (type: smallint), tinyint_col_20 (type: tinyint)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   key expressions: UDFToLong(_col2) (type: bigint), _col0 (type: decimal(34,16)), _col4 (type: tinyint)
                   sort order: +++
                   Map-reduce partition columns: UDFToLong(_col2) (type: bigint), _col0 (type: decimal(34,16)), _col4 (type: tinyint)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  value expressions: _col1 (type: int)
+                  value expressions: _col1 (type: int), _col3 (type: smallint)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -97,11 +97,11 @@ STAGE PLANS:
           keys:
             0 _col1 (type: bigint), _col4 (type: decimal(34,16)), _col0 (type: tinyint)
             1 UDFToLong(_col2) (type: bigint), _col0 (type: decimal(34,16)), _col4 (type: tinyint)
-          outputColumnNames: _col2, _col3, _col5, _col7
+          outputColumnNames: _col2, _col3, _col5, _col7, _col9
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col2 (type: timestamp), _col3 (type: double), _col5 (type: smallint), _col7 (type: int)
-            outputColumnNames: _col0, _col1, _col2, _col3
+            expressions: _col2 (type: timestamp), _col3 (type: double), _col5 (type: smallint), _col7 (type: int), UDFToInteger(_col9) (type: int)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
               compressed: false
@@ -115,16 +115,16 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: timestamp), -92 (type: int)
+              key expressions: _col0 (type: timestamp), _col4 (type: int)
               sort order: ++
-              Map-reduce partition columns: _col0 (type: timestamp), -92 (type: int)
+              Map-reduce partition columns: _col0 (type: timestamp), _col4 (type: int)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               value expressions: _col1 (type: double), _col2 (type: smallint), _col3 (type: int)
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: timestamp), -92 (type: int)
+              key expressions: _col0 (type: timestamp), _col1 (type: int)
               sort order: ++
-              Map-reduce partition columns: _col0 (type: timestamp), -92 (type: int)
+              Map-reduce partition columns: _col0 (type: timestamp), _col1 (type: int)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Join Operator
@@ -280,11 +280,11 @@ STAGE PLANS:
           outputColumnNames: _col2
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col2 (type: timestamp)
-            outputColumnNames: _col0
+            expressions: _col2 (type: timestamp), -92 (type: int)
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Group By Operator
-              keys: _col0 (type: timestamp), -92 (type: int)
+              keys: _col0 (type: timestamp), _col1 (type: int)
               mode: hash
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/skewjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin.q.out b/ql/src/test/results/clientpositive/skewjoin.q.out
index bd954ef..9f07a10 100644
--- a/ql/src/test/results/clientpositive/skewjoin.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin.q.out
@@ -790,9 +790,9 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                  key expressions: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                  Map-reduce partition columns: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
@@ -802,7 +802,7 @@ STAGE PLANS:
           handleSkewJoin: true
           keys:
             0 _col0 (type: string), UDFToDouble(substring(_col1, 5)) (type: double)
-            1 _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+            1 _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
           outputColumnNames: _col2, _col3
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out
index b0db59e..29c6d31 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out
@@ -46,8 +46,9 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket_3
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[20][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-4:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[18][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-4:MAPRED' is a cross product
 PREHOOK: query: explain 
 select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5
 PREHOOK: type: QUERY
@@ -106,23 +107,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: 5 (type: int)
-              sort order: +
-              Map-reduce partition columns: 5 (type: int)
+              sort order: 
               Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE
           TableScan
             Reduce Output Operator
-              key expressions: 5 (type: int)
-              sort order: +
-              Map-reduce partition columns: 5 (type: int)
+              sort order: 
               Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 5 (type: int)
-            1 5 (type: int)
+            0 
+            1 
           Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 5 (type: int), 5 (type: int)
@@ -182,8 +179,11 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[37][bigTable=?] in task 'Stage-9:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[38][bigTable=?] in task 'Stage-10:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-7:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-9:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[36][bigTable=?] in task 'Stage-10:MAPRED' is a cross product
 PREHOOK: query: -- explain
 -- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value
 
@@ -276,8 +276,8 @@ STAGE PLANS:
           TableScan
             HashTable Sink Operator
               keys:
-                0 5 (type: int)
-                1 5 (type: int)
+                0 
+                1 
 
   Stage: Stage-6
     Map Reduce
@@ -287,8 +287,8 @@ STAGE PLANS:
               condition map:
                    Inner Join 0 to 1
               keys:
-                0 5 (type: int)
-                1 5 (type: int)
+                0 
+                1 
               Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), 5 (type: int)
@@ -315,8 +315,8 @@ STAGE PLANS:
           TableScan
             HashTable Sink Operator
               keys:
-                0 5 (type: int)
-                1 5 (type: int)
+                0 
+                1 
 
   Stage: Stage-7
     Map Reduce
@@ -326,8 +326,8 @@ STAGE PLANS:
               condition map:
                    Inner Join 0 to 1
               keys:
-                0 5 (type: int)
-                1 5 (type: int)
+                0 
+                1 
               Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), 5 (type: int)
@@ -348,23 +348,19 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: 5 (type: int)
-              sort order: +
-              Map-reduce partition columns: 5 (type: int)
+              sort order: 
               Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE
           TableScan
             Reduce Output Operator
-              key expressions: 5 (type: int)
-              sort order: +
-              Map-reduce partition columns: 5 (type: int)
+              sort order: 
               Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 5 (type: int)
-            1 5 (type: int)
+            0 
+            1 
           Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 5 (type: int), 5 (type: int)
@@ -432,8 +428,11 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[37][bigTable=?] in task 'Stage-9:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[38][bigTable=?] in task 'Stage-10:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-7:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-9:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[36][bigTable=?] in task 'Stage-10:MAPRED' is a cross product
 PREHOOK: query: select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@smb_bucket_1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join8.q.out b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
index a769f4c..5427a0a 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
@@ -157,7 +157,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dest1
 POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION []
 POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/auto_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join_filters.q.out b/ql/src/test/results/clientpositive/spark/auto_join_filters.q.out
index 84810d5..85b2a32 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join_filters.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -300,7 +300,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table sm
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_input2
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/auto_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join_nulls.q.out b/ql/src/test/results/clientpositive/spark/auto_join_nulls.q.out
index 15f4791..32a885b 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join_nulls.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -24,7 +24,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 13630578
-Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
index eff3671..e8ba2d1 100644
--- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out
@@ -138,7 +138,7 @@ POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3out
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@bucket_medium@ds=2008-04-08
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
@@ -563,7 +563,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucket_big

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out b/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
index f164f9d..518adca 100644
--- a/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out
@@ -22,7 +22,7 @@ POSTHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1) tmp1 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out b/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
index 85387a7..de829e2 100644
--- a/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
@@ -434,7 +434,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((dimid = 100) = true) and (dimid <> 100) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and (dimid <> 100)) (type: boolean)
                     Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -452,10 +452,10 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((id = 100) = true) and (id <> 100) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and (id <> 100)) (type: boolean)
                     Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: id (type: int), (id = 100) (type: boolean)
+                      expressions: id (type: int), true (type: boolean)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
@@ -523,7 +523,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((dimid) IN (100, 200) and ((dimid = 100) = true) and (dimid = 100) is not null) (type: boolean)
+                    predicate: ((dimid) IN (100, 200) and ((dimid = 100) = true)) (type: boolean)
                     Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -541,10 +541,10 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((id) IN (100, 200) and ((id = 100) = true) and (id = 100) is not null) (type: boolean)
+                    predicate: ((id) IN (100, 200) and ((id = 100) = true)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: id (type: int), (id = 100) (type: boolean)
+                      expressions: id (type: int), true (type: boolean)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
@@ -614,7 +614,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((dimid = 100) = true) and (dimid = 200) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and (dimid = 200)) (type: boolean)
                     Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string)
@@ -632,19 +632,21 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((id = 100) = true) and (id = 200) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and (id = 200)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
+                      expressions: 200 (type: int), true (type: boolean)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        keys: 200 (type: int), false (type: boolean)
+                        keys: _col0 (type: int), _col1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: 200 (type: int), false (type: boolean)
+                          key expressions: _col0 (type: int), _col1 (type: boolean)
                           sort order: ++
-                          Map-reduce partition columns: 200 (type: int), false (type: boolean)
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: boolean)
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
@@ -652,7 +654,7 @@ STAGE PLANS:
                 condition map:
                      Left Semi Join 0 to 1
                 keys:
-                  0 _col3 (type: int), true (type: boolean)
+                  0 200 (type: int), true (type: boolean)
                   1 _col0 (type: int), _col1 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 2 Data size: 44 Basic stats: COMPLETE Column stats: NONE
@@ -701,7 +703,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((dimid = 100) = true) and (dimid = 100) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and (dimid = 100)) (type: boolean)
                     Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string)
@@ -719,19 +721,21 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((id = 100) = true) and (id = 100) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and (id = 100)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
+                      expressions: 100 (type: int), true (type: boolean)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        keys: 100 (type: int), true (type: boolean)
+                        keys: _col0 (type: int), _col1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: 100 (type: int), true (type: boolean)
+                          key expressions: _col0 (type: int), _col1 (type: boolean)
                           sort order: ++
-                          Map-reduce partition columns: 100 (type: int), true (type: boolean)
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: boolean)
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
@@ -739,7 +743,7 @@ STAGE PLANS:
                 condition map:
                      Left Semi Join 0 to 1
                 keys:
-                  0 _col3 (type: int), true (type: boolean)
+                  0 100 (type: int), true (type: boolean)
                   1 _col0 (type: int), _col1 (type: boolean)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 2 Data size: 44 Basic stats: COMPLETE Column stats: NONE
@@ -790,7 +794,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((dimid = 100) = true) and dimid is not null and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and dimid is not null) (type: boolean)
                     Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -808,10 +812,10 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((id = 100) = true) and id is not null and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and id is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: id (type: int), (id = 100) (type: boolean)
+                      expressions: id (type: int), true (type: boolean)
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/cross_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cross_join.q.out b/ql/src/test/results/clientpositive/spark/cross_join.q.out
index 2740c18..6b9a4be 100644
--- a/ql/src/test/results/clientpositive/spark/cross_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/cross_join.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: -- current
 explain select src.key from src join src src2
 PREHOOK: type: QUERY
@@ -63,7 +63,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: -- ansi cross join
 explain select src.key from src cross join src src2
 PREHOOK: type: QUERY
@@ -203,7 +203,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select src.key from src join src src2
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select src.key from src join src src2
@@ -271,7 +271,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select src.key from src cross join src src2
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select src.key from src cross join src src2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
index 65f0c22..9a965dd 100644
--- a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
 POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join B
@@ -98,7 +98,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
@@ -202,7 +202,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join 
          (select d1.key 
           from B d1 join B d2 on d1.key = d2.key
@@ -328,8 +328,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 4' is a cross product
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 4' is a cross product
+Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1  group by d1.key) od1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1  group by d1.key) od1
@@ -438,7 +438,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain select * from 
 (select A.key from A  group by key) ss join
 (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
index 26bee4e..af4f968 100644
--- a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
 POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join B
@@ -102,7 +102,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
@@ -209,7 +209,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join 
          (select d1.key 
           from B d1 join B d2 on d1.key = d2.key 
@@ -343,8 +343,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
@@ -464,7 +464,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from 
 (select A.key from A group by key) ss join 
 (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1


[13/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_alt_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/join_alt_syntax.q.out
index 339e004..17b10bd 100644
--- a/ql/src/test/results/clientpositive/join_alt_syntax.q.out
+++ b/ql/src/test/results/clientpositive/join_alt_syntax.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select p1.p_name, p2.p_name
 from part p1 , part p2
 PREHOOK: type: QUERY
@@ -230,7 +230,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select p1.p_name, p2.p_name, p3.p_name
 from part p1 , part p2 , part p3 
 where p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out
index b4f1eb3..eab6178 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out
@@ -172,7 +172,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 on p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name
 PREHOOK: type: QUERY
@@ -281,7 +281,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 on p2.p_partkey = 1 and p3.p_name = p2.p_name
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out
index 3975b4f..771745f 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out
@@ -176,7 +176,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 
 where p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name
@@ -287,7 +287,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 
 where p2.p_partkey = 1 and p3.p_name = p2.p_name

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out
index 82d1b82..8cbc70d 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out
@@ -228,7 +228,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name
 PREHOOK: type: QUERY
@@ -337,7 +337,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 on p2_partkey = 1 and p3_name = p2_name
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out
index 297154f..19a89e7 100644
--- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out
+++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out
@@ -232,7 +232,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 
 where p2_partkey + p1.p_partkey = p1.p_partkey and p3_name = p2_name
@@ -343,7 +343,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 
 where p2_partkey = 1 and p3_name = p2_name

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_filters.q.out b/ql/src/test/results/clientpositive/join_filters.q.out
index 79e8b07..b909ad5 100644
--- a/ql/src/test/results/clientpositive/join_filters.q.out
+++ b/ql/src/test/results/clientpositive/join_filters.q.out
@@ -18,7 +18,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -774,7 +774,7 @@ NULL	NULL	48	NULL
 NULL	NULL	NULL	135
 NULL	NULL	NULL	35
 UBr9lyqgsjDFvooMgQlZ9w==
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_nulls.q.out b/ql/src/test/results/clientpositive/join_nulls.q.out
index b536985..2401447 100644
--- a/ql/src/test/results/clientpositive/join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/join_nulls.q.out
@@ -18,7 +18,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -36,7 +36,7 @@ POSTHOOK: Input: default@myinput1
 NULL	35	100	100
 NULL	35	48	NULL
 NULL	35	NULL	35
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_reorder.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_reorder.q.out b/ql/src/test/results/clientpositive/join_reorder.q.out
index ba3c8d4..6091a5f 100644
--- a/ql/src/test/results/clientpositive/join_reorder.q.out
+++ b/ql/src/test/results/clientpositive/join_reorder.q.out
@@ -91,9 +91,9 @@ STAGE PLANS:
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
+                  key expressions: (UDFToDouble(_col0) + 1.0) (type: double)
                   sort order: +
-                  Map-reduce partition columns: (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
+                  Map-reduce partition columns: (UDFToDouble(_col0) + 1.0) (type: double)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string)
       Reduce Operator Tree:
@@ -102,7 +102,7 @@ STAGE PLANS:
                Inner Join 0 to 1
           keys:
             0 UDFToDouble(_col0) (type: double)
-            1 (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
+            1 (UDFToDouble(_col0) + 1.0) (type: double)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_view.q.out b/ql/src/test/results/clientpositive/join_view.q.out
index 57043fb..db68591 100644
--- a/ql/src/test/results/clientpositive/join_view.q.out
+++ b/ql/src/test/results/clientpositive/join_view.q.out
@@ -38,6 +38,7 @@ POSTHOOK: Input: default@invites
 POSTHOOK: Input: default@invites2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@v
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from v where ds='2011-09-01'
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from v where ds='2011-09-01'
@@ -61,9 +62,7 @@ STAGE PLANS:
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '2011-09-01' (type: string)
-                  sort order: +
-                  Map-reduce partition columns: '2011-09-01' (type: string)
+                  sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col0 (type: string)
           TableScan
@@ -77,9 +76,7 @@ STAGE PLANS:
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '2011-09-01' (type: string)
-                  sort order: +
-                  Map-reduce partition columns: '2011-09-01' (type: string)
+                  sort order: 
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   value expressions: _col0 (type: int)
       Reduce Operator Tree:
@@ -87,8 +84,8 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col1 (type: string)
-            1 _col1 (type: string)
+            0 
+            1 
           outputColumnNames: _col0, _col2
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out
index a08094a..39bb6dc 100644
--- a/ql/src/test/results/clientpositive/lineage2.q.out
+++ b/ql/src/test/results/clientpositive/lineage2.q.out
@@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > 10.0) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 150	val_150
 213	val_213

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index 61acf52..12ae13e 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -116,7 +116,7 @@ order by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,4],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = alltypesorc.ctinyint)","edgeType":"PREDICATE"},{"sour
 ces":[5,6],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"afd760470fc5aa6d3e8348dee03af97f","queryText":"select a.cbigint, a.ctinyint, b.cint, b.ctinyint\nfrom\n  (select ctinyint, cbigint from alltypesorc\n   union all\n   select ctinyint, cbigint from alltypesorc) a\n  inner join\n  alltypesorc b\n  on (a.ctinyint = b.ctinyint)\nwhere b.ctinyint < 100 and a.cbigint is not null and b.cint is not null\norder by a.cbigint, a.ctinyint, b.cint, b.ctinyint limit 5","edges":[{"sources":[4],"targets":[0],"expression":"cbigint","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"ctinyint","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[5],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,4],"targets":[0,1,2,3],"expression":"(alltypesorc.ctinyint is not null and alltypesorc.cbigint is not null and (alltypesorc.ctinyint < 100))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(ctinyint = alltypesorc.ctin
 yint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((alltypesorc.ctinyint < 100) and alltypesorc.cint is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.cbigint"},{"id":1,"vertexType":"COLUMN","vertexId":"a.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"b.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"b.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"}]}
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1071480828	-51
 -2147311592	-51	-1067683781	-51
@@ -135,7 +135,7 @@ and x.ctinyint + length(c.cstring2) < 1000
 PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"3a12ad24b2622a8958df12d0bdc60f8a","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n   select a.ctinyint ctinyint, b.cint cint\n   from (select * from alltypesorc a where cboolean1=false) a\n   join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - UDFToLong(100))","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(UDFToDouble(c.cint) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = c.cint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((UDFToDouble(c.
 cint) < 4.5) and c.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((c.cbigint - UDFToLong(224870380)) = UDFToLong(c.cint))","edgeType":"PREDICATE"},{"sources":[8,4,5],"targets":[0,1,2,3],"expression":"((c.cboolean1 = false) and (c.ctinyint > 10) and c.cint is not null)","edgeType":"PREDICATE"},{"sources":[4,9],"targets":[0,1,2,3],"expression":"((UDFToInteger(c.ctinyint) + length(c.cstring2)) < 1000)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"ve
 rtexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"3a12ad24b2622a8958df12d0bdc60f8a","queryText":"select x.ctinyint, x.cint, c.cbigint-100, c.cstring1\nfrom alltypesorc c\njoin (\n   select a.ctinyint ctinyint, b.cint cint\n   from (select * from alltypesorc a where cboolean1=false) a\n   join alltypesorc b on (a.cint = b.cbigint - 224870380)\n ) x on (x.cint = c.cint)\nwhere x.ctinyint > 10\nand x.cint < 4.5\nand x.ctinyint + length(c.cstring2) < 1000","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"expression":"(c.cbigint - 100)","edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5],"targets":[0,1,2,3],"expression":"(UDFToDouble(c.cint) < 4.5)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1,2,3],"expression":"(c.cint = c.cint)","edgeType":"PREDICATE"},{"sources":[5,6],"targets":[0,1,2,3],"expression":"((UDFToDouble(c.cint) < 4.5
 ) and c.cbigint is not null)","edgeType":"PREDICATE"},{"sources":[6,5],"targets":[0,1,2,3],"expression":"((c.cbigint - 224870380) = UDFToLong(c.cint))","edgeType":"PREDICATE"},{"sources":[8,4,5],"targets":[0,1,2,3],"expression":"((c.cboolean1 = false) and (c.ctinyint > 10) and c.cint is not null)","edgeType":"PREDICATE"},{"sources":[4,9],"targets":[0,1,2,3],"expression":"((UDFToInteger(c.ctinyint) + length(c.cstring2)) < 1000)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"x.ctinyint"},{"id":1,"vertexType":"COLUMN","vertexId":"x.cint"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"c.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":8,"vertexType":"COLUMN","ve
 rtexId":"default.alltypesorc.cboolean1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"}]}
 11	-654374827	857266369	OEfPnHnIYueoup
 PREHOOK: query: select c1, x2, x3
 from (
@@ -166,7 +166,7 @@ where key in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"UDFToDouble(src1.key) is not null","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8b9d63653e36ecf4dd425d3cc3de9199","queryText":"select key, value from src1\nwhere key in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"UDFToDouble(src1.key) is not null","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + 18.0))","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + 18.0) is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 273	val_273
 PREHOOK: query: select * from src1 a
@@ -178,15 +178,15 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((UDFToDouble(a.key) > UDFToDouble(300)) and UDFToDouble(a.key) is not null)","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"UDFToDouble((UDFToInteger(b.ctinyint) + 300)) is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.valu
 e"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0)","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"UDFToDouble((UDFToInteger(b.ctinyint) + 300)) is not null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"de
 fault.alltypesorc.ctinyint"}]}
 311	val_311
-Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select key, value from src1
 where key not in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + UDFToDouble(18)) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"true","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + UDFToDouble(18)))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","verte
 xId":"default.src1"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"9b488fe1d7cf018aad3825173808cd36","queryText":"select key, value from src1\nwhere key not in (select key+18 from src1) order by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) + 18.0) is null","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(count(*) = 0)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(src1.key) = (UDFToDouble(src1.key) + 18.0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"TABLE","vertexId":"default.src1"}]}
 PREHOOK: query: select * from src1 a
 where not exists
   (select cint from alltypesorc b
@@ -196,7 +196,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"defa
 ult.alltypesorc.ctinyint"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"53191056e05af9080a30de853e8cea9c","queryText":"select * from src1 a\nwhere not exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0)","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"(UDFToInteger(b.ctinyint) + 300) is null","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltype
 sorc.ctinyint"}]}
 369	
 401	val_401
 406	val_406

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
index 6a9b4a1..b415ba5 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
@@ -73,7 +73,7 @@ SELECT key+11 FROM src WHERE key=484
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@fact_tz@ds=1/hr=2
-POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION []
 #### A masked pattern was here ####
 PREHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1
 alter table fact_daily skewed by (x) on (484)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
index 8517a52..45f2481 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
@@ -76,7 +76,7 @@ SELECT key+11, value FROM src WHERE key=484
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@fact_tz@ds=1/hr=2
-POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION []
 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 #### A masked pattern was here ####
 PREHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1
@@ -517,10 +517,10 @@ STAGE PLANS:
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: 484 (type: int)
+                    key expressions: _col0 (type: int)
                     null sort order: a
                     sort order: +
-                    Map-reduce partition columns: 484 (type: int)
+                    Map-reduce partition columns: _col0 (type: int)
                     Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                     tag: -1
                     value expressions: _col1 (type: bigint)
@@ -577,7 +577,7 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: 484 (type: int)
+          keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
index 9ba574d..3722d9e 100644
--- a/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 with q1 as (select * from src where key= '5')
 select a.key
@@ -33,9 +34,7 @@ STAGE PLANS:
                     Select Operator
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: '5' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: '5' (type: string)
+                        sort order: 
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -50,9 +49,7 @@ STAGE PLANS:
                     Select Operator
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: '5' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: '5' (type: string)
+                        sort order: 
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -63,8 +60,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 '5' (type: string)
-                  1 '5' (type: string)
+                  0 
+                  1 
                 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: '5' (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
index 9ba574d..3722d9e 100644
--- a/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 with q1 as (select * from src where key= '5')
 select a.key
@@ -33,9 +34,7 @@ STAGE PLANS:
                     Select Operator
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: '5' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: '5' (type: string)
+                        sort order: 
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -50,9 +49,7 @@ STAGE PLANS:
                     Select Operator
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: '5' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: '5' (type: string)
+                        sort order: 
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -63,8 +60,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 '5' (type: string)
-                  1 '5' (type: string)
+                  0 
+                  1 
                 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: '5' (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index 9f07718..56618d1 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -1633,12 +1633,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -1662,7 +1662,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1742,9 +1742,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -1790,7 +1790,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1891,9 +1891,9 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -1905,7 +1905,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1985,9 +1985,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -2018,7 +2018,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -2112,9 +2112,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       sort order: +
-                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -2160,7 +2160,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                  0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                   1 UDFToString(_col0) (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -2226,6 +2226,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -2250,17 +2251,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Map 4 
@@ -2269,16 +2266,18 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
         Reducer 2 
@@ -2288,9 +2287,9 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col0 (type: string)
-                  1 '2008-04-08' (type: string)
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+                  0 
+                  1 
+                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -2319,32 +2318,15 @@ STAGE PLANS:
             Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
-                keys: '2008-04-08' (type: string)
+                keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    sort order: 
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -2352,21 +2334,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
@@ -2382,7 +2361,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
 EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
@@ -2483,7 +2462,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -4742,7 +4721,7 @@ STAGE PLANS:
                            Inner Join 0 to 1
                       keys:
                         0 UDFToDouble(_col0) (type: double)
-                        1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                       input vertices:
                         1 Map 3
                       Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
@@ -4771,12 +4750,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -4864,7 +4843,7 @@ STAGE PLANS:
                       condition map:
                            Inner Join 0 to 1
                       keys:
-                        0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                        0 (UDFToDouble(_col0) * 2.0) (type: double)
                         1 _col0 (type: double)
                       input vertices:
                         1 Map 3
@@ -4969,6 +4948,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -4984,60 +4964,72 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Map 1 <- Reducer 4 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 4 <- Map 3 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (BROADCAST_EDGE), Map 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Map Join Operator
-                      condition map:
-                           Inner Join 0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 '2008-04-08' (type: string)
-                      input vertices:
-                        1 Reducer 4
-                      Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
-        Map 3 
+        Map 2 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
-        Reducer 2 
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Inner Join 0 to 1
+                    keys:
+                      0 
+                      1 
+                    input vertices:
+                      0 Map 1
+                    Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 4 
             Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
@@ -5052,36 +5044,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Execution mode: llap
-            Reduce Operator Tree:
-              Group By Operator
-                keys: '2008-04-08' (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -5089,21 +5051,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
index 8ed8ab4..b0024b3 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
@@ -574,7 +574,7 @@ bar
 baz
 baz
 baz
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT agg.amount
 FROM agg_01 agg,
 dim_shops d1
@@ -651,7 +651,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT agg.amount
 FROM agg_01 agg,
 dim_shops d1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
index c750dc2..5bfc5e7 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
@@ -1297,7 +1297,7 @@ POSTHOOK: Lineage: decimal_mapjoin.cdecimal1 EXPRESSION [(alltypesorc)alltypesor
 POSTHOOK: Lineage: decimal_mapjoin.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_mapjoin.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_mapjoin.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1380,7 +1380,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1495,7 +1495,7 @@ POSTHOOK: Input: default@decimal_mapjoin
 6981	6981	-515.6210729730	NULL
 6981	6981	-515.6210729730	NULL
 6981	6981	-515.6210729730	NULL
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1578,7 +1578,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
index 98ba8dd..c9417f4 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -186,7 +186,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -315,7 +315,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -468,7 +468,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -602,7 +602,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -734,7 +734,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/tez_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_self_join.q.out b/ql/src/test/results/clientpositive/llap/tez_self_join.q.out
index 68f231c..48c1b09 100644
--- a/ql/src/test/results/clientpositive/llap/tez_self_join.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_self_join.q.out
@@ -42,6 +42,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@tez_self_join2
 POSTHOOK: Lineage: tez_self_join2.id1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 select s.id2, s.id3
 from  
@@ -90,9 +91,7 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'ab' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: 'ab' (type: string)
+                        sort order: 
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int), _col2 (type: string)
             Execution mode: llap
@@ -108,9 +107,7 @@ STAGE PLANS:
                     Select Operator
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'ab' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: 'ab' (type: string)
+                        sort order: 
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
             LLAP IO: no inputs
@@ -140,16 +137,20 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 'ab' (type: string)
-                  1 'ab' (type: string)
+                  0 
+                  1 
                 outputColumnNames: _col0, _col2
                 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                Select Operator
+                  expressions: _col0 (type: int), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string)
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: string)
         Reducer 3 
             Execution mode: llap
             Reduce Operator Tree:
@@ -159,10 +160,10 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                outputColumnNames: _col2
+                outputColumnNames: _col1
                 Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: 'ab' (type: string), _col2 (type: string)
+                  expressions: 'ab' (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -179,6 +180,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select s.id2, s.id3
 from  
 (

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out b/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
index 14a273b..ce86640 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union_dynamic_partition.q.out
@@ -66,21 +66,17 @@ STAGE PLANS:
                   alias: dummy
                   Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
+                    expressions: 1 (type: int), '2014' (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col0 (type: int), '2014' (type: string)
-                      outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.partunion1
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.partunion1
             Execution mode: llap
             LLAP IO: no inputs
         Map 3 
@@ -89,21 +85,17 @@ STAGE PLANS:
                   alias: dummy
                   Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: 2 (type: int)
-                    outputColumnNames: _col0
+                    expressions: 2 (type: int), '2014' (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col0 (type: int), '2014' (type: string)
-                      outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.partunion1
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.partunion1
             Execution mode: llap
             LLAP IO: no inputs
         Union 2 


[53/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
deleted file mode 100644
index c15c6a2..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
+++ /dev/null
@@ -1,813 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10786               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 4
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10786
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
deleted file mode 100644
index d484626..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.8.out
+++ /dev/null
@@ -1,915 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10786               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_static_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '11'
-               =
-                  TOK_TABLE_OR_COL
-                     key
-                  '484'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               'val_484'
-
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 4
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10786
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part


[64/66] [abbrv] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
index 9e9e61f..4352914 100644
--- a/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
+++ b/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
@@ -70,7 +70,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -116,7 +116,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -259,7 +259,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -305,7 +305,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -351,7 +351,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -397,7 +397,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
index e03c055..f3fd8f8 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -75,7 +75,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
index de1d6f4..df42672 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out
@@ -87,7 +87,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -133,7 +133,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
index eabf9d9..6377e95 100644
--- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
+++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out
@@ -21,7 +21,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -109,7 +109,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/reduce_deduplicate.q.out b/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
index 075336b..dfcbd7d 100644
--- a/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
+++ b/ql/src/test/results/clientpositive/reduce_deduplicate.q.out
@@ -49,7 +49,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -69,7 +69,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/regexp_extract.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/regexp_extract.q.out b/ql/src/test/results/clientpositive/regexp_extract.q.out
index 7026df3..fb7794c 100644
--- a/ql/src/test/results/clientpositive/regexp_extract.q.out
+++ b/ql/src/test/results/clientpositive/regexp_extract.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -88,7 +88,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -317,7 +317,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -337,7 +337,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/router_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out
index 1f79be9..bf46fe9 100644
--- a/ql/src/test/results/clientpositive/router_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out
@@ -79,7 +79,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -99,7 +99,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -126,7 +126,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -172,7 +172,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -218,7 +218,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -264,7 +264,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -474,7 +474,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -494,7 +494,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -521,7 +521,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -567,7 +567,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -770,7 +770,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -790,7 +790,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -817,7 +817,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -863,7 +863,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1062,7 +1062,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1082,7 +1082,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1109,7 +1109,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1155,7 +1155,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample1.q.out b/ql/src/test/results/clientpositive/sample1.q.out
index 57e61b9..f57519b 100644
--- a/ql/src/test/results/clientpositive/sample1.q.out
+++ b/ql/src/test/results/clientpositive/sample1.q.out
@@ -87,7 +87,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample2.q.out b/ql/src/test/results/clientpositive/sample2.q.out
index 92f0d5a..096805d 100644
--- a/ql/src/test/results/clientpositive/sample2.q.out
+++ b/ql/src/test/results/clientpositive/sample2.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -106,7 +106,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample4.q.out b/ql/src/test/results/clientpositive/sample4.q.out
index b4e58c5..72395c9 100644
--- a/ql/src/test/results/clientpositive/sample4.q.out
+++ b/ql/src/test/results/clientpositive/sample4.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -106,7 +106,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample5.q.out b/ql/src/test/results/clientpositive/sample5.q.out
index c786f21..147a567 100644
--- a/ql/src/test/results/clientpositive/sample5.q.out
+++ b/ql/src/test/results/clientpositive/sample5.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -107,7 +107,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample6.q.out b/ql/src/test/results/clientpositive/sample6.q.out
index 519647f..a34258d 100644
--- a/ql/src/test/results/clientpositive/sample6.q.out
+++ b/ql/src/test/results/clientpositive/sample6.q.out
@@ -83,7 +83,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -104,7 +104,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value
@@ -652,7 +652,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -673,7 +673,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value
@@ -1021,7 +1021,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -1042,7 +1042,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value
@@ -1643,7 +1643,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -1664,7 +1664,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value
@@ -2108,7 +2108,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -2129,7 +2129,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value
@@ -2560,7 +2560,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -2581,7 +2581,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 4
                 bucket_field_name key
                 columns key,value
@@ -2606,7 +2606,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -2627,7 +2627,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 4
                 bucket_field_name key
                 columns key,value
@@ -2859,7 +2859,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 4
               bucket_field_name key
               columns key,value
@@ -2880,7 +2880,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 4
                 bucket_field_name key
                 columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample7.q.out b/ql/src/test/results/clientpositive/sample7.q.out
index 2352cdc..51a45dd 100644
--- a/ql/src/test/results/clientpositive/sample7.q.out
+++ b/ql/src/test/results/clientpositive/sample7.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -105,7 +105,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample8.q.out b/ql/src/test/results/clientpositive/sample8.q.out
index dd55fa0..b316331 100644
--- a/ql/src/test/results/clientpositive/sample8.q.out
+++ b/ql/src/test/results/clientpositive/sample8.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -114,7 +114,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -160,7 +160,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -206,7 +206,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/sample9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample9.q.out b/ql/src/test/results/clientpositive/sample9.q.out
index 14a512a..a4c3ff6 100644
--- a/ql/src/test/results/clientpositive/sample9.q.out
+++ b/ql/src/test/results/clientpositive/sample9.q.out
@@ -58,7 +58,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count 2
               bucket_field_name key
               columns key,value
@@ -79,7 +79,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count 2
                 bucket_field_name key
                 columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/schema_evol_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/schema_evol_stats.q.out b/ql/src/test/results/clientpositive/schema_evol_stats.q.out
index 63dab2e..63b4c19 100644
--- a/ql/src/test/results/clientpositive/schema_evol_stats.q.out
+++ b/ql/src/test/results/clientpositive/schema_evol_stats.q.out
@@ -109,7 +109,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	40                  
@@ -150,7 +150,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	56                  
@@ -309,7 +309,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	384                 
@@ -350,7 +350,7 @@ Database:           	default
 Table:              	partitioned1        	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"b\":\"true\",\"c\":\"true\",\"d\":\"true\"}}
 	numFiles            	1                   
 	numRows             	4                   
 	rawDataSize         	732                 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/serde_user_properties.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/serde_user_properties.q.out b/ql/src/test/results/clientpositive/serde_user_properties.q.out
index 1dbc274..d5b81ed 100644
--- a/ql/src/test/results/clientpositive/serde_user_properties.q.out
+++ b/ql/src/test/results/clientpositive/serde_user_properties.q.out
@@ -101,7 +101,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -121,7 +121,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -225,7 +225,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -246,7 +246,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -351,7 +351,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -372,7 +372,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket2.q.out b/ql/src/test/results/clientpositive/spark/bucket2.q.out
index f9d4782..dd23a25 100644
--- a/ql/src/test/results/clientpositive/spark/bucket2.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket2.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -77,7 +77,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket3.q.out b/ql/src/test/results/clientpositive/spark/bucket3.q.out
index 39d9c33..f4acd71 100644
--- a/ql/src/test/results/clientpositive/spark/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket3.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -77,7 +77,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/bucket4.q.out b/ql/src/test/results/clientpositive/spark/bucket4.q.out
index 68f8143..b1ef928 100644
--- a/ql/src/test/results/clientpositive/spark/bucket4.q.out
+++ b/ql/src/test/results/clientpositive/spark/bucket4.q.out
@@ -54,7 +54,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -74,7 +74,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out
index 5396ada..f7165fc 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -720,7 +720,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -740,7 +740,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
index c8503cd..8cefe46 100644
--- a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -73,7 +73,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
index f97f63e..beae497 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -119,7 +119,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
index c833657..2ad4d68 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out
@@ -73,7 +73,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -119,7 +119,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
index a2c2ced..f1e1027 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out
@@ -66,7 +66,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -112,7 +112,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
index 531854b..5251241 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out
@@ -66,7 +66,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -112,7 +112,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/input_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/input_part2.q.out b/ql/src/test/results/clientpositive/spark/input_part2.q.out
index 4799a7f..36bb40f 100644
--- a/ql/src/test/results/clientpositive/spark/input_part2.q.out
+++ b/ql/src/test/results/clientpositive/spark/input_part2.q.out
@@ -136,7 +136,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -182,7 +182,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join17.q.out b/ql/src/test/results/clientpositive/spark/join17.q.out
index 3acf7f9..a7103cb 100644
--- a/ql/src/test/results/clientpositive/spark/join17.q.out
+++ b/ql/src/test/results/clientpositive/spark/join17.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -82,7 +82,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -134,7 +134,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -154,7 +154,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join26.q.out b/ql/src/test/results/clientpositive/spark/join26.q.out
index 4967ab6..cacfe42 100644
--- a/ql/src/test/results/clientpositive/spark/join26.q.out
+++ b/ql/src/test/results/clientpositive/spark/join26.q.out
@@ -60,7 +60,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -80,7 +80,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -127,7 +127,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -147,7 +147,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -245,7 +245,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out
index be29cd5..7cecbc6 100644
--- a/ql/src/test/results/clientpositive/spark/join32.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -87,7 +87,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -137,7 +137,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -157,7 +157,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -267,7 +267,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
index 8a99a56..97c520c 100644
--- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out
@@ -75,7 +75,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -95,7 +95,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -145,7 +145,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -165,7 +165,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -275,7 +275,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -518,7 +518,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -538,7 +538,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -604,7 +604,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -624,7 +624,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -674,7 +674,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -694,7 +694,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -801,7 +801,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -821,7 +821,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1043,7 +1043,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1112,7 +1112,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1132,7 +1132,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1243,7 +1243,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1263,7 +1263,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1487,7 +1487,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1552,7 +1552,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1572,7 +1572,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1683,7 +1683,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1703,7 +1703,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out
index be29cd5..7cecbc6 100644
--- a/ql/src/test/results/clientpositive/spark/join33.q.out
+++ b/ql/src/test/results/clientpositive/spark/join33.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -87,7 +87,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -137,7 +137,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -157,7 +157,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -267,7 +267,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'


[35/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out b/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
deleted file mode 100644
index 459d93b..0000000
--- a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.7.out
+++ /dev/null
@@ -1,457 +0,0 @@
-PREHOOK: query: drop table varchar_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table varchar_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@varchar_udf_1
-PREHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@varchar_udf_1
-POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-238val_238	238val_238	true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-VAL_238	VAL_238	true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-118	118	true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-238|val_238	238|val_238	true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-4	4	true
-PREHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-7	7	true
-PREHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-5	5	true
-PREHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-        val_238	        val_238	true
-PREHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  c2 regexp 'val',
-  c4 regexp 'val',
-  (c2 regexp 'val') = (c4 regexp 'val')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  c2 regexp 'val',
-  c4 regexp 'val',
-  (c2 regexp 'val') = (c4 regexp 'val')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-true	true	true
-PREHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-238	238	true
-PREHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-replaced_238	replaced_238	true
-PREHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-832_lav	832_lav	true
-PREHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238        	val_238        	true
-PREHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as varchar(50)))
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as varchar(50)))
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
-PREHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-["val","238"]	["val","238"]
-PREHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-{"b":"2","a":"1","c":"3"}	{"b":"2","a":"1","c":"3"}
-PREHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val	val	true
-PREHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from varchar_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from varchar_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
-PREHOOK: query: select
-  min(c2),
-  min(c4)
-from varchar_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  min(c2),
-  min(c4)
-from varchar_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238
-PREHOOK: query: select
-  max(c2),
-  max(c4)
-from varchar_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  max(c2),
-  max(c4)
-from varchar_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238
-PREHOOK: query: drop table varchar_udf_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@varchar_udf_1
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: drop table varchar_udf_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@varchar_udf_1
-POSTHOOK: Output: default@varchar_udf_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/varchar_udf1.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.8.out b/ql/src/test/results/clientpositive/varchar_udf1.q.java1.8.out
deleted file mode 100644
index ace8568..0000000
--- a/ql/src/test/results/clientpositive/varchar_udf1.q.java1.8.out
+++ /dev/null
@@ -1,457 +0,0 @@
-PREHOOK: query: drop table varchar_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table varchar_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@varchar_udf_1
-PREHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: insert overwrite table varchar_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@varchar_udf_1
-POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with varchar support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-238val_238	238val_238	true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-VAL_238	VAL_238	true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-118	118	true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-238|val_238	238|val_238	true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-4	4	true
-PREHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-7	7	true
-PREHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-5	5	true
-PREHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-        val_238	        val_238	true
-PREHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  regexp(c2, 'val'),
-  regexp(c4, 'val'),
-  regexp(c2, 'val') = regexp(c4, 'val')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp(c2, 'val'),
-  regexp(c4, 'val'),
-  regexp(c2, 'val') = regexp(c4, 'val')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-true	true	true
-PREHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-238	238	true
-PREHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-replaced_238	replaced_238	true
-PREHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-832_lav	832_lav	true
-PREHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238        	val_238        	true
-PREHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as varchar(50)))
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as varchar(50)))
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
-PREHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-["val","238"]	["val","238"]
-PREHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-{"a":"1","b":"2","c":"3"}	{"a":"1","b":"2","c":"3"}
-PREHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val	val	true
-PREHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from varchar_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from varchar_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from varchar_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from varchar_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1}
-PREHOOK: query: select
-  min(c2),
-  min(c4)
-from varchar_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  min(c2),
-  min(c4)
-from varchar_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238
-PREHOOK: query: select
-  max(c2),
-  max(c4)
-from varchar_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  max(c2),
-  max(c4)
-from varchar_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@varchar_udf_1
-#### A masked pattern was here ####
-val_238	val_238
-PREHOOK: query: drop table varchar_udf_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@varchar_udf_1
-PREHOOK: Output: default@varchar_udf_1
-POSTHOOK: query: drop table varchar_udf_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@varchar_udf_1
-POSTHOOK: Output: default@varchar_udf_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/varchar_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/varchar_udf1.q.out b/ql/src/test/results/clientpositive/varchar_udf1.q.out
new file mode 100644
index 0000000..e5cfce5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/varchar_udf1.q.out
@@ -0,0 +1,453 @@
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@varchar_udf_1
+PREHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@varchar_udf_1
+POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- UDFs with varchar support
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- UDFs with varchar support
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238val_238	238val_238	true
+PREHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+VAL_238	VAL_238	true
+PREHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Scalar UDFs
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Scalar UDFs
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+118	118	true
+PREHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238|val_238	238|val_238	true
+PREHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+4	4	true
+PREHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+7	7	true
+PREHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+5	5	true
+PREHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+        val_238	        val_238	true
+PREHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+true	true	true
+PREHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+replaced_238	replaced_238	true
+PREHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+832_lav	832_lav	true
+PREHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238        	val_238        	true
+PREHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
+PREHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+["val","238"]	["val","238"]
+PREHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+{"a":"1","b":"2","c":"3"}	{"a":"1","b":"2","c":"3"}
+PREHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val	val	true
+PREHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Aggregate Functions
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Aggregate Functions
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
+PREHOOK: query: select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238
+PREHOOK: query: select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@varchar_udf_1
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@varchar_udf_1
+POSTHOOK: Output: default@varchar_udf_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
deleted file mode 100644
index 867dd4c..0000000
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.7.out
+++ /dev/null
@@ -1,220 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: over1korc
-            Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: i (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: avg(50), avg(50.0), avg(50)
-                keys: _col0 (type: int)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-          keys: KEY._col0 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: int)
-              sort order: +
-              Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-              TopN Hash Memory Usage: 0.1
-              value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50.0000
-65537	50.0	50.0	50.0000
-65538	50.0	50.0	50.0000
-65539	50.0	50.0	50.0000
-65540	50.0	50.0	50.0000
-65541	50.0	50.0	50.0000
-65542	50.0	50.0	50.0000
-65543	50.0	50.0	50.0000
-65544	50.0	50.0	50.0000
-65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.8.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.8.out
deleted file mode 100644
index 789e6c2..0000000
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.java1.8.out
+++ /dev/null
@@ -1,197 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: over1korc
-            Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: i (type: int)
-              outputColumnNames: _col0
-              Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: avg(50), avg(50.0), avg(50)
-                keys: _col0 (type: int)
-                mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-          keys: KEY._col0 (type: int)
-          mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-          Limit
-            Number of rows: 10
-            Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
index 39ed1c8..6033aad 100644
--- a/ql/src/test/results/clientpositive/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/vector_cast_constant.q.out
@@ -102,18 +102,19 @@ PREHOOK: query: EXPLAIN SELECT
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
 
 STAGE PLANS:
   Stage: Stage-1
@@ -146,6 +147,28 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: int)
+              sort order: +
+              Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+              TopN Hash Memory Usage: 0.1
+              value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
           Limit
             Number of rows: 10
             Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
@@ -168,7 +191,7 @@ PREHOOK: query: SELECT
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1korc
 #### A masked pattern was here ####
@@ -177,17 +200,17 @@ POSTHOOK: query: SELECT
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000


[30/66] [abbrv] hive git commit: HIVE-13332 : support dumping all row indexes in ORC FileDump (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/tez/orc_merge12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_merge12.q.out b/ql/src/test/results/clientpositive/tez/orc_merge12.q.out
index f23be5a..6a86fcf 100644
--- a/ql/src/test/results/clientpositive/tez/orc_merge12.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_merge12.q.out
@@ -429,9 +429,117 @@ Stripes:
     Encoding column 34: DIRECT_V2
     Encoding column 35: DIRECT
     Encoding column 36: DIRECT
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 2288 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
       Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 2:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 3:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 4:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 5:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 6:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 7:
+      Entry 0: count: 10000 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 360000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 002d8ccb-a094-4d10-b283-999770cf8488 max: ffacef94-41da-4230-807a-509bbf50b057 sum: 82368 positions: 153190,97856,0,9766,272
+    Row group indices for column 8:
+      Entry 0: count: 10000 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 720000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 00124556-8383-44c4-a28b-7a413de74ccc4137606f-2cf7-43fb-beff-b6d374fd15ec max: ffde3bce-bb56-4fa9-81d7-146ca2eab946225c18e0-0002-4d07-9853-12c92c0f5637 sum: 164736 positions: 306445,195712,0,9766,272
+    Row group indices for column 9:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 10:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 11:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 12:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 13:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 14:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 15:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 16:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 17:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 18:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 19:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 20:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 21:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 22:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 23:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 24:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 25:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 26:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 27:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 28:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 29:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 30:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 31:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 32:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 33:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 34:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 35:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 36:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
   Stripe: offset: 1503357 data: 1500017 rows: 12288 tail: 501 index: 2836
     Stream: column 0 section ROW_INDEX start: 1503357 length 21
     Stream: column 1 section ROW_INDEX start: 1503378 length 53
@@ -593,9 +701,117 @@ Stripes:
     Encoding column 34: DIRECT_V2
     Encoding column 35: DIRECT
     Encoding column 36: DIRECT
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 2288 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
       Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 2:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 3:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 4:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 5:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 6:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 7:
+      Entry 0: count: 10000 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 360000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 002d8ccb-a094-4d10-b283-999770cf8488 max: ffacef94-41da-4230-807a-509bbf50b057 sum: 82368 positions: 153190,97856,0,9766,272
+    Row group indices for column 8:
+      Entry 0: count: 10000 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 720000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 00124556-8383-44c4-a28b-7a413de74ccc4137606f-2cf7-43fb-beff-b6d374fd15ec max: ffde3bce-bb56-4fa9-81d7-146ca2eab946225c18e0-0002-4d07-9853-12c92c0f5637 sum: 164736 positions: 306445,195712,0,9766,272
+    Row group indices for column 9:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 10:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 11:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 12:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 13:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 14:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 15:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 16:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 17:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 18:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 19:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 20:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 21:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 22:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 23:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 24:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 25:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 26:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 27:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 28:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 29:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 30:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 31:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 32:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 33:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 34:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 35:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 36:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
 
 File length: 3007981 bytes
 Padding length: 0 bytes


[49/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
new file mode 100644
index 0000000..e53fee7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out
@@ -0,0 +1,1005 @@
+PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge.
+-- The following explains merge example used in this test case
+-- DML will generated 2 partitions
+-- ds=2008-04-08/hr=a1
+-- ds=2008-04-08/hr=b1
+-- without merge, each partition has more files
+-- ds=2008-04-08/hr=a1 has 2 files
+-- ds=2008-04-08/hr=b1 has 6 files
+-- with merge each partition has more files
+-- ds=2008-04-08/hr=a1 has 1 files
+-- ds=2008-04-08/hr=b1 has 4 files
+-- The following shows file size and name in each directory
+-- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 155 000000_0
+-- 155 000001_0
+-- with merge
+-- 254 000000_0
+-- hr=b1/key=103/value=val_103:
+-- without merge
+-- 99 000000_0
+-- 99 000001_0
+-- with merge
+-- 142 000001_0
+-- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- with merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- hr=b1/key=484/value=val_484
+-- without merge
+-- 87 000000_0
+-- 87 000001_0
+-- with merge
+-- 118 000002_0 
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge.
+-- The following explains merge example used in this test case
+-- DML will generated 2 partitions
+-- ds=2008-04-08/hr=a1
+-- ds=2008-04-08/hr=b1
+-- without merge, each partition has more files
+-- ds=2008-04-08/hr=a1 has 2 files
+-- ds=2008-04-08/hr=b1 has 6 files
+-- with merge each partition has more files
+-- ds=2008-04-08/hr=a1 has 1 files
+-- ds=2008-04-08/hr=b1 has 4 files
+-- The following shows file size and name in each directory
+-- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 155 000000_0
+-- 155 000001_0
+-- with merge
+-- 254 000000_0
+-- hr=b1/key=103/value=val_103:
+-- without merge
+-- 99 000000_0
+-- 99 000001_0
+-- with merge
+-- 142 000001_0
+-- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- with merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- hr=b1/key=484/value=val_484
+-- without merge
+-- 87 000000_0
+-- 87 000001_0
+-- with merge
+-- 118 000002_0 
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_dynamic_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_dynamic_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_dynamic_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_dynamic_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_dynamic_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+ds=2008-04-08/hr=a1
+ds=2008-04-08/hr=b1
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, a1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	16                  
+	rawDataSize         	136                 
+	totalSize           	310                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, b1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	6                   
+	numRows             	984                 
+	rawDataSize         	9488                
+	totalSize           	10734               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_dynamic_part
+                      partition_columns hr
+                      partition_columns.types string
+                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_dynamic_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns hr
+                partition_columns.types string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+  Stage: Stage-3
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              partition_columns hr
+              partition_columns.types string
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns hr
+                partition_columns.types string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-5
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              partition_columns hr
+              partition_columns.types string
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns hr
+                partition_columns.types string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_dynamic_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_dynamic_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+ds=2008-04-08/hr=a1
+ds=2008-04-08/hr=b1
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, a1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	1                   
+	numRows             	16                  
+	rawDataSize         	136                 
+	totalSize           	254                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, b1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	984                 
+	rawDataSize         	9488                
+	totalSize           	10622               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from list_bucketing_dynamic_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from list_bucketing_dynamic_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+1000
+PREHOOK: query: explain extended
+select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr a1
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              numFiles 1
+              numRows 16
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 136
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 254
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr b1
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              numFiles 4
+              numRows 984
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 9488
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 10622
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_dynamic_part
+          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: '484' (type: string), 'val_484' (type: string), ds (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+484	val_484	2008-04-08	b1
+484	val_484	2008-04-08	b1
+PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	12
+PREHOOK: query: -- clean up
+drop table list_bucketing_dynamic_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- clean up
+drop table list_bucketing_dynamic_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Output: default@list_bucketing_dynamic_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
deleted file mode 100644
index de1305f..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
+++ /dev/null
@@ -1,641 +0,0 @@
-PREHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-ds=2008-04-08/hr=a1
-ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, a1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	2                   
-	numRows             	16                  
-	rawDataSize         	136                 
-	totalSize           	310                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10734               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- concatenate the partition and it will merge files
-alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate
-PREHOOK: type: ALTER_PARTITION_MERGE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: query: -- concatenate the partition and it will merge files
-alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate
-POSTHOOK: type: ALTER_PARTITION_MERGE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	numFiles            	3                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10586               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_dynamic_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_dynamic_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr a1
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 2
-              numRows 16
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 136
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 310
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr b1
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 3
-              numRows 984
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9488
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10586
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_dynamic_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-484	val_484	2008-04-08	b1
-484	val_484	2008-04-08	b1
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part


[03/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/tez_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_self_join.q.out b/ql/src/test/results/clientpositive/tez/tez_self_join.q.out
index f5375b2..52404a0 100644
--- a/ql/src/test/results/clientpositive/tez/tez_self_join.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_self_join.q.out
@@ -42,6 +42,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@values__tmp__table__2
 POSTHOOK: Output: default@tez_self_join2
 POSTHOOK: Lineage: tez_self_join2.id1 EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 select s.id2, s.id3
 from  
@@ -90,9 +91,7 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'ab' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: 'ab' (type: string)
+                        sort order: 
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: int), _col2 (type: string)
         Map 4 
@@ -106,9 +105,7 @@ STAGE PLANS:
                     Select Operator
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'ab' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: 'ab' (type: string)
+                        sort order: 
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Map 5 
             Map Operator Tree:
@@ -133,16 +130,20 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 'ab' (type: string)
-                  1 'ab' (type: string)
+                  0 
+                  1 
                 outputColumnNames: _col0, _col2
                 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: int)
+                Select Operator
+                  expressions: _col0 (type: int), _col2 (type: string)
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: string)
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: int)
+                    Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: string)
         Reducer 3 
             Reduce Operator Tree:
               Merge Join Operator
@@ -151,10 +152,10 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-                outputColumnNames: _col2
+                outputColumnNames: _col1
                 Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: 'ab' (type: string), _col2 (type: string)
+                  expressions: 'ab' (type: string), _col1 (type: string)
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -171,6 +172,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select s.id2, s.id3
 from  
 (

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/tez_union_dynamic_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_union_dynamic_partition.q.out b/ql/src/test/results/clientpositive/tez/tez_union_dynamic_partition.q.out
index abb0707..a1221a4 100644
--- a/ql/src/test/results/clientpositive/tez/tez_union_dynamic_partition.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_union_dynamic_partition.q.out
@@ -66,42 +66,34 @@ STAGE PLANS:
                   alias: dummy
                   Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
+                    expressions: 1 (type: int), '2014' (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col0 (type: int), '2014' (type: string)
-                      outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.partunion1
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.partunion1
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: dummy
                   Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: 2 (type: int)
-                    outputColumnNames: _col0
+                    expressions: 2 (type: int), '2014' (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col0 (type: int), '2014' (type: string)
-                      outputColumnNames: _col0, _col1
+                    File Output Operator
+                      compressed: false
                       Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                      File Output Operator
-                        compressed: false
-                        Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
-                        table:
-                            input format: org.apache.hadoop.mapred.TextInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                            name: default.partunion1
+                      table:
+                          input format: org.apache.hadoop.mapred.TextInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                          name: default.partunion1
         Union 2 
             Vertex: Union 2
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/tez_vector_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_vector_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/tez_vector_dynpart_hashjoin_1.q.out
index 47699c6..776e02e 100644
--- a/ql/src/test/results/clientpositive/tez/tez_vector_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_vector_dynpart_hashjoin_1.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -178,7 +178,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -301,7 +301,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -448,7 +448,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -580,7 +580,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -710,7 +710,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out b/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
index 578205e..0cc0b9c 100644
--- a/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
+++ b/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
@@ -55,7 +55,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -83,7 +83,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -99,13 +99,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all
@@ -388,7 +388,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -416,7 +416,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -432,13 +432,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out b/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
index d548364..8a9978b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
@@ -70,7 +70,7 @@ POSTHOOK: Output: default@TINT
 POSTHOOK: Lineage: tint.cint SIMPLE [(tint_txt)tint_txt.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: tint.rnum SIMPLE [(tint_txt)tint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tint_txt.rnum	tint_txt.cint
-Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint
 PREHOOK: type: QUERY
@@ -144,7 +144,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tint

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
index 2bf93e3..8cbb4b1 100644
--- a/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
@@ -194,7 +194,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(*))
 FROM hundredorc t1 JOIN hundredorc t2 ON t2.bin = t2.bin
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out b/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
index bb67008..e705e60 100644
--- a/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_coalesce.q.out
@@ -48,7 +48,7 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: null (type: double), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: smallint), KEY.reducesinkkey5 (type: string)
+                expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: smallint), KEY.reducesinkkey5 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 Limit
@@ -140,7 +140,7 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: null (type: tinyint), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: double)
+                expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 Limit
@@ -230,7 +230,7 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: null (type: float), null (type: bigint), 0.0 (type: float)
+                expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: float)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
                 Limit
@@ -412,7 +412,7 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: null (type: float), null (type: bigint), null (type: float)
+                expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey0 (type: float)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
                 Limit
@@ -481,7 +481,7 @@ STAGE PLANS:
           Filter Operator
             predicate: cbigint is null (type: boolean)
             Select Operator
-              expressions: null (type: bigint), ctinyint (type: tinyint), COALESCE(null,ctinyint) (type: bigint)
+              expressions: null (type: bigint), ctinyint (type: tinyint), COALESCE(null,ctinyint) (type: tinyint)
               outputColumnNames: _col0, _col1, _col2
               Limit
                 Number of rows: 10

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_date_1.q.out b/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
index a27edcb..9f31820 100644
--- a/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
@@ -665,7 +665,7 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 2001-01-01 (type: date), VALUE._col0 (type: date)
+                expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
index 701f0b7..292fb88 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out
@@ -1048,17 +1048,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 3.14 (type: decimal(4,2))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 3.14 (type: decimal(3,2))
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(4,2))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 3.14 (type: decimal(4,2))
+                expressions: VALUE._col0 (type: decimal(4,2))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1108,17 +1111,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 3.14 (type: decimal(4,2))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 3.14 (type: decimal(3,2))
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(4,2))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 3.14 (type: decimal(4,2))
+                expressions: VALUE._col0 (type: decimal(4,2))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1168,17 +1174,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 1355944339.1234567 (type: decimal(30,8))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 1355944339.1234567 (type: decimal(17,7))
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(30,8))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 1355944339.1234567 (type: decimal(30,8))
+                expressions: VALUE._col0 (type: decimal(30,8))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1228,17 +1237,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 1 (type: decimal(10,0))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 1 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 1 (type: decimal(10,0))
+                expressions: VALUE._col0 (type: decimal(10,0))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1279,17 +1291,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 1 (type: decimal(10,0))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 1 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 1 (type: decimal(10,0))
+                expressions: VALUE._col0 (type: decimal(10,0))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1339,17 +1354,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 3 (type: decimal(10,0))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 3 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 3 (type: decimal(10,0))
+                expressions: VALUE._col0 (type: decimal(10,0))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1399,17 +1417,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 3 (type: decimal(10,0))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 3 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 3 (type: decimal(10,0))
+                expressions: VALUE._col0 (type: decimal(10,0))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1459,17 +1480,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 3 (type: decimal(10,0))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 3 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 3 (type: decimal(10,0))
+                expressions: VALUE._col0 (type: decimal(10,0))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1519,17 +1543,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 3 (type: decimal(10,0))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 3 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(10,0))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 3 (type: decimal(10,0))
+                expressions: VALUE._col0 (type: decimal(10,0))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1579,17 +1606,20 @@ STAGE PLANS:
                   alias: decimal_2
                   Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: 1 (type: decimal(20,19))
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 1 (type: int)
                       sort order: +
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                      value expressions: _col0 (type: decimal(20,19))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 1 (type: decimal(20,19))
+                expressions: VALUE._col0 (type: decimal(20,19))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
@@ -1649,7 +1679,7 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 0.99999999999999999999 (type: decimal(20,20))
+                expressions: KEY.reducesinkkey0 (type: decimal(20,20))
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
index a7b8385..3b6d42d 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_round_2.q.out
@@ -461,20 +461,20 @@ STAGE PLANS:
                   alias: decimal_tbl_4_orc
                   Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9))
-                    outputColumnNames: _col0, _col1
+                    expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9)), 1809242.315111134 (type: decimal(17,9)), -1809242.315111134 (type: decimal(17,9))
+                    outputColumnNames: _col0, _col1, _col2, _col3
                     Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: decimal(30,9))
                       sort order: +
                       Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: decimal(30,9))
+                      value expressions: _col1 (type: decimal(30,9)), _col2 (type: decimal(17,9)), _col3 (type: decimal(17,9))
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), 1809242.315111134 (type: decimal(17,9)), -1809242.315111134 (type: decimal(17,9))
+                expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), VALUE._col1 (type: decimal(17,9)), VALUE._col2 (type: decimal(17,9))
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_groupby_mapjoin.q.out
index d406f2b..d69012e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_groupby_mapjoin.q.out
@@ -1,4 +1,4 @@
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: -- HIVE-12738 -- We are checking if a MapJoin after a GroupBy will work properly.
 explain
 select *
@@ -27,39 +27,39 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 2 vectorized
-      File Output Operator [FS_34]
-        Select Operator [SEL_33] (rows=302 width=10)
+      File Output Operator [FS_33]
+        Select Operator [SEL_32] (rows=302 width=10)
           Output:["_col0","_col1"]
         <-Map 1 [SIMPLE_EDGE]
-          SHUFFLE [RS_22]
-            Select Operator [SEL_21] (rows=302 width=10)
+          SHUFFLE [RS_21]
+            Select Operator [SEL_20] (rows=302 width=10)
               Output:["_col0","_col1"]
-              Filter Operator [FIL_20] (rows=302 width=10)
+              Filter Operator [FIL_19] (rows=302 width=10)
                 predicate:_col3 is null
-                Map Join Operator [MAPJOIN_29] (rows=605 width=10)
-                  Conds:MAPJOIN_28._col0=RS_18._col0(Left Outer),HybridGraceHashJoin:true,Output:["_col0","_col1","_col3"]
+                Map Join Operator [MAPJOIN_28] (rows=605 width=10)
+                  Conds:MAPJOIN_27._col0=RS_17._col0(Left Outer),HybridGraceHashJoin:true,Output:["_col0","_col1","_col3"]
                 <-Map 5 [BROADCAST_EDGE]
-                  BROADCAST [RS_18]
+                  BROADCAST [RS_17]
                     PartitionCols:_col0
                     Select Operator [SEL_12] (rows=500 width=10)
                       Output:["_col0"]
                       TableScan [TS_11] (rows=500 width=10)
                         default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
-                <-Map Join Operator [MAPJOIN_28] (rows=550 width=10)
+                <-Map Join Operator [MAPJOIN_27] (rows=550 width=10)
                     Conds:(Inner),Output:["_col0","_col1"]
                   <-Reducer 4 [BROADCAST_EDGE] vectorized
-                    BROADCAST [RS_15]
+                    BROADCAST [RS_14]
                       Select Operator [SEL_10] (rows=1 width=8)
                         Filter Operator [FIL_9] (rows=1 width=8)
                           predicate:(_col0 = 0)
-                          Group By Operator [GBY_32] (rows=1 width=8)
+                          Group By Operator [GBY_31] (rows=1 width=8)
                             Output:["_col0"],aggregations:["count(VALUE._col0)"]
                           <-Map 3 [SIMPLE_EDGE]
                             SHUFFLE [RS_6]
                               Group By Operator [GBY_5] (rows=1 width=8)
                                 Output:["_col0"],aggregations:["count()"]
                                 Select Operator [SEL_4] (rows=250 width=10)
-                                  Filter Operator [FIL_26] (rows=250 width=10)
+                                  Filter Operator [FIL_25] (rows=250 width=10)
                                     predicate:key is null
                                     TableScan [TS_2] (rows=500 width=10)
                                       default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
@@ -68,7 +68,7 @@ Stage-0
                       TableScan [TS_0] (rows=500 width=10)
                         default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
 
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: select *
 from src
 where not key in

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_interval_1.q.out b/ql/src/test/results/clientpositive/tez/vector_interval_1.q.out
index dbfa842..dbf2a5a 100644
--- a/ql/src/test/results/clientpositive/tez/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_interval_1.q.out
@@ -162,20 +162,20 @@ STAGE PLANS:
                   alias: vector_interval_1
                   Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month)
-                    outputColumnNames: _col0, _col2, _col3, _col5, _col6
+                    expressions: dt (type: date), 2-4 (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), 0-0 (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month)
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                     Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: date)
                       sort order: +
                       Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
+                      value expressions: _col1 (type: interval_year_month), _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col4 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: date), 2-4 (type: interval_year_month), VALUE._col1 (type: interval_year_month), VALUE._col2 (type: interval_year_month), 0-0 (type: interval_year_month), VALUE._col4 (type: interval_year_month), VALUE._col5 (type: interval_year_month)
+                expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month), VALUE._col4 (type: interval_year_month), VALUE._col5 (type: interval_year_month)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
index 8409a01..064e319 100644
--- a/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_interval_arithmetic.q.out
@@ -510,18 +510,21 @@ STAGE PLANS:
                   alias: interval_arithmetic_1
                   Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
+                    expressions: -1-1 (type: interval_year_month)
+                    outputColumnNames: _col1
                     Statistics: Num rows: 50 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                     Reduce Output Operator
                       key expressions: 5-5 (type: interval_year_month)
                       sort order: +
                       Statistics: Num rows: 50 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                       TopN Hash Memory Usage: 0.1
+                      value expressions: _col1 (type: interval_year_month)
             Execution mode: vectorized
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Select Operator
-                expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month)
+                expressions: KEY.reducesinkkey0 (type: interval_year_month), VALUE._col0 (type: interval_year_month)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 50 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out b/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
index d50e079..a010722 100644
--- a/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
@@ -30,7 +30,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
 POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out b/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
index 97b3242..95b35b6 100644
--- a/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
@@ -30,7 +30,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
 POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
-Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -40,7 +40,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 13630578
-Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
index 2864a48..52c520a 100644
--- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
@@ -209,7 +209,7 @@ STAGE PLANS:
                              Left Semi Join 0 to 1
                         keys:
                           0 _col0 (type: int), 1 (type: int)
-                          1 _col0 (type: int), 1 (type: int)
+                          1 _col0 (type: int), _col1 (type: int)
                         outputColumnNames: _col1, _col2
                         input vertices:
                           1 Map 2
@@ -230,18 +230,18 @@ STAGE PLANS:
                     predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: l_orderkey (type: int)
-                      outputColumnNames: _col0
+                      expressions: l_orderkey (type: int), 1 (type: int)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int), 1 (type: int)
+                        keys: _col0 (type: int), _col1 (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: int), 1 (type: int)
+                          key expressions: _col0 (type: int), _col1 (type: int)
                           sort order: ++
-                          Map-reduce partition columns: _col0 (type: int), 1 (type: int)
+                          Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                           Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
         Map 3 
             Map Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out b/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
index a4ccef2..07c3d60 100644
--- a/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_null_projection.q.out
@@ -120,9 +120,9 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                         Reduce Output Operator
-                          key expressions: null (type: void)
+                          key expressions: _col0 (type: void)
                           sort order: +
-                          Map-reduce partition columns: null (type: void)
+                          Map-reduce partition columns: _col0 (type: void)
                           Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
         Map 4 
             Map Operator Tree:
@@ -139,14 +139,14 @@ STAGE PLANS:
                         outputColumnNames: _col0
                         Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                         Reduce Output Operator
-                          key expressions: null (type: void)
+                          key expressions: _col0 (type: void)
                           sort order: +
-                          Map-reduce partition columns: null (type: void)
+                          Map-reduce partition columns: _col0 (type: void)
                           Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
         Reducer 3 
             Reduce Operator Tree:
               Group By Operator
-                keys: null (type: void)
+                keys: KEY._col0 (type: void)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
index 946a558..e5459c8 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -65,7 +65,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -81,13 +81,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
index 1998344..2a8f1e0 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -54,7 +54,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
-POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
@@ -76,12 +76,12 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
-POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
index f20163b..b343795 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -61,7 +61,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.
 POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
@@ -81,9 +81,9 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
index 90a9efb..8ae89e8 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
@@ -37,7 +37,7 @@ POSTHOOK: Lineage: small_alltypesorc2b.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -65,7 +65,7 @@ POSTHOOK: Lineage: small_alltypesorc3b.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -81,13 +81,13 @@ POSTHOOK: Lineage: small_alltypesorc4b.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE []
 PREHOOK: query: select * from small_alltypesorc1b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1b

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
index c1c251f..cb6fda0 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
@@ -569,7 +569,7 @@ STAGE PLANS:
                            Left Outer Join0 to 1
                       keys:
                         0 UDFToLong(_col1) (type: bigint)
-                        1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 4
@@ -591,9 +591,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (_col0 pmod UDFToLong(8)) (type: bigint)
+                      key expressions: (_col0 pmod 8) (type: bigint)
                       sort order: +
-                      Map-reduce partition columns: (_col0 pmod UDFToLong(8)) (type: bigint)
+                      Map-reduce partition columns: (_col0 pmod 8) (type: bigint)
                       Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Map 5 
@@ -1239,7 +1239,7 @@ STAGE PLANS:
                            Left Outer Join0 to 1
                       keys:
                         0 UDFToLong(_col1) (type: bigint)
-                        1 (_col0 pmod UDFToLong(8)) (type: bigint)
+                        1 (_col0 pmod 8) (type: bigint)
                       outputColumnNames: _col0
                       input vertices:
                         1 Map 4
@@ -1261,9 +1261,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (_col0 pmod UDFToLong(8)) (type: bigint)
+                      key expressions: (_col0 pmod 8) (type: bigint)
                       sort order: +
-                      Map-reduce partition columns: (_col0 pmod UDFToLong(8)) (type: bigint)
+                      Map-reduce partition columns: (_col0 pmod 8) (type: bigint)
                       Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Map 5 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
index 70f8d1b..a686ace 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
@@ -2356,7 +2356,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1)) and cboolean2 is not null) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
+                    predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cstring1 (type: string), ctimestamp1 (type: timestamp), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
@@ -2690,7 +2690,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and (cboolean1 >= 1) and cstring1 is not null) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null)) and cboolean1 is not null) (type: boolean)
+                    predicate: ((((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null)) and cboolean1 is not null) (type: boolean)
                     Statistics: Num rows: 10239 Data size: 2201421 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)


[15/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
index a1be9b9d..e19bb9e 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out
@@ -362,11 +362,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
@@ -420,11 +419,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
@@ -478,11 +476,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE
@@ -536,11 +533,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
@@ -594,11 +590,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
@@ -652,11 +647,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
@@ -767,11 +761,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
@@ -880,11 +873,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
@@ -938,11 +930,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
@@ -996,11 +987,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
@@ -1054,11 +1044,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
@@ -1112,11 +1101,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
@@ -1170,11 +1158,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
@@ -1281,11 +1268,10 @@ STAGE PLANS:
               Group By Operator
                 keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
-                pruneGroupingSetId: true
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
                   sort order: +++
                   Map-reduce partition columns: _col0 (type: string), _col1 (type: int)
                   Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
index f3982b8..01a367e 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
@@ -141,7 +141,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: ((_col1 = 40) and (_col0 = 40)) (type: boolean)
+                predicate: ((_col0 = _col1) and (_col1 = 40) and (_col0 = 40)) (type: boolean)
                 Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
index 2d3f12b..9e0613f 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_lineage2.q.out
@@ -18,7 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src1
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
+{"version":"1.0","engine":"mr","database":"default","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > 10.0) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
 146	val_146
 150	val_150
 213	val_213

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
index d567ec8..f67f81b 100644
--- a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
+++ b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
@@ -24,9 +24,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src_null
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@all_nulls
-POSTHOOK: Lineage: all_nulls.a SIMPLE [(src_null)src_null.FieldSchema(name:a, type:bigint, comment:null), ]
-POSTHOOK: Lineage: all_nulls.b EXPRESSION [(src_null)src_null.FieldSchema(name:a, type:bigint, comment:null), ]
-POSTHOOK: Lineage: all_nulls.c EXPRESSION [(src_null)src_null.FieldSchema(name:a, type:bigint, comment:null), ]
+POSTHOOK: Lineage: all_nulls.a SIMPLE []
+POSTHOOK: Lineage: all_nulls.b SIMPLE []
+POSTHOOK: Lineage: all_nulls.c SIMPLE []
 PREHOOK: query: analyze table all_nulls compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@all_nulls

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
index c7a39f5..2aa8d77 100644
--- a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
+++ b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 explain extended
@@ -188,7 +188,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c
 PREHOOK: type: QUERY
 PREHOOK: Input: default@src

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/constant_prop_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constant_prop_3.q.out b/ql/src/test/results/clientpositive/constant_prop_3.q.out
index 3635913..fe382fc 100644
--- a/ql/src/test/results/clientpositive/constant_prop_3.q.out
+++ b/ql/src/test/results/clientpositive/constant_prop_3.q.out
@@ -88,7 +88,7 @@ POSTHOOK: query: analyze table supplier_hive compute statistics for columns
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@supplier_hive
 #### A masked pattern was here ####
-Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select
 	p_brand,
 	p_type,

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/constprog2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog2.q.out b/ql/src/test/results/clientpositive/constprog2.q.out
index 0d76fc8..8ea1444 100644
--- a/ql/src/test/results/clientpositive/constprog2.q.out
+++ b/ql/src/test/results/clientpositive/constprog2.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT src1.key, src1.key + 1, src2.value
        FROM srcbucket src1 join srcbucket src2 ON src1.key = src2.key AND src1.key = 86
@@ -90,11 +90,13 @@ STAGE PLANS:
               predicate: (UDFToDouble(key) = 86.0) (type: boolean)
               Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE
               Select Operator
+                expressions: key (type: int)
+                outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 86 (type: int)
+                  key expressions: _col0 (type: int)
                   sort order: +
-                  Map-reduce partition columns: 86 (type: int)
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src1
@@ -103,13 +105,13 @@ STAGE PLANS:
               predicate: (UDFToDouble(key) = 86.0) (type: boolean)
               Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: value (type: string)
-                outputColumnNames: _col1
+                expressions: key (type: int), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 86 (type: int)
+                  key expressions: _col0 (type: int)
                   sort order: +
-                  Map-reduce partition columns: 86 (type: int)
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
@@ -119,10 +121,10 @@ STAGE PLANS:
           keys:
             0 _col0 (type: int)
             1 _col0 (type: int)
-          outputColumnNames: _col2
+          outputColumnNames: _col0, _col2
           Statistics: Num rows: 550 Data size: 5831 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: 86 (type: int), 87 (type: int), _col2 (type: string)
+            expressions: _col0 (type: int), (_col0 + 1) (type: int), _col2 (type: string)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 550 Data size: 5831 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/constprog3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog3.q.out b/ql/src/test/results/clientpositive/constprog3.q.out
index e01a733..3fd776e 100644
--- a/ql/src/test/results/clientpositive/constprog3.q.out
+++ b/ql/src/test/results/clientpositive/constprog3.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: create temporary table table3(id int, val int, val1 int)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table3
-Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 select table1.id, table1.val, table1.val1
 from table1 inner join table3
@@ -49,15 +49,15 @@ STAGE PLANS:
                   value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
           TableScan
             alias: table3
-            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-              Filter Operator
-                predicate: false (type: boolean)
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            Filter Operator
+              predicate: (id = 1) (type: boolean)
+              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/constprog_partitioner.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog_partitioner.q.out b/ql/src/test/results/clientpositive/constprog_partitioner.q.out
index 56874ae..6a3f2ce 100644
--- a/ql/src/test/results/clientpositive/constprog_partitioner.q.out
+++ b/ql/src/test/results/clientpositive/constprog_partitioner.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT src1.key, src1.key + 1, src2.value
        FROM srcbucket src1 join srcbucket src2 ON src1.key = src2.key AND src1.key = 100
@@ -110,25 +110,25 @@ STAGE PLANS:
               predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
               Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: l_orderkey (type: int)
-                outputColumnNames: _col0
+                expressions: l_orderkey (type: int), 1 (type: int)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int), 1 (type: int)
+                  keys: _col0 (type: int), _col1 (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: int), 1 (type: int)
+                    key expressions: _col0 (type: int), _col1 (type: int)
                     sort order: ++
-                    Map-reduce partition columns: _col0 (type: int), 1 (type: int)
+                    Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                     Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Left Semi Join 0 to 1
           keys:
-            0 _col0 (type: int), _col3 (type: int)
+            0 _col0 (type: int), 1 (type: int)
             1 _col0 (type: int), _col1 (type: int)
           outputColumnNames: _col1, _col2
           Statistics: Num rows: 55 Data size: 6598 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/constprog_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog_semijoin.q.out b/ql/src/test/results/clientpositive/constprog_semijoin.q.out
index 35d062d..1940987 100644
--- a/ql/src/test/results/clientpositive/constprog_semijoin.q.out
+++ b/ql/src/test/results/clientpositive/constprog_semijoin.q.out
@@ -421,7 +421,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((dimid = 100) = true) and (dimid <> 100) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and (dimid <> 100)) (type: boolean)
               Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -437,10 +437,10 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((id = 100) = true) and (id <> 100) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and (id <> 100)) (type: boolean)
               Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: id (type: int), (id = 100) (type: boolean)
+                expressions: id (type: int), true (type: boolean)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -502,7 +502,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((dimid) IN (100, 200) and ((dimid = 100) = true) and (dimid = 100) is not null) (type: boolean)
+              predicate: ((dimid) IN (100, 200) and ((dimid = 100) = true)) (type: boolean)
               Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -518,10 +518,10 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((id) IN (100, 200) and ((id = 100) = true) and (id = 100) is not null) (type: boolean)
+              predicate: ((id) IN (100, 200) and ((id = 100) = true)) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: id (type: int), (id = 100) (type: boolean)
+                expressions: id (type: int), true (type: boolean)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -585,7 +585,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((dimid = 100) = true) and (dimid = 200) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and (dimid = 200)) (type: boolean)
               Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string)
@@ -601,26 +601,28 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((id = 100) = true) and (id = 200) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and (id = 200)) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
+                expressions: 200 (type: int), true (type: boolean)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: 200 (type: int), false (type: boolean)
+                  keys: _col0 (type: int), _col1 (type: boolean)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: 200 (type: int), false (type: boolean)
+                    key expressions: _col0 (type: int), _col1 (type: boolean)
                     sort order: ++
-                    Map-reduce partition columns: 200 (type: int), false (type: boolean)
+                    Map-reduce partition columns: _col0 (type: int), _col1 (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Left Semi Join 0 to 1
           keys:
-            0 _col3 (type: int), true (type: boolean)
+            0 200 (type: int), true (type: boolean)
             1 _col0 (type: int), _col1 (type: boolean)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 2 Data size: 44 Basic stats: COMPLETE Column stats: NONE
@@ -664,7 +666,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((dimid = 100) = true) and (dimid = 100) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and (dimid = 100)) (type: boolean)
               Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string)
@@ -680,26 +682,28 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((id = 100) = true) and (id = 100) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and (id = 100)) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
+                expressions: 100 (type: int), true (type: boolean)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: 100 (type: int), true (type: boolean)
+                  keys: _col0 (type: int), _col1 (type: boolean)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: 100 (type: int), true (type: boolean)
+                    key expressions: _col0 (type: int), _col1 (type: boolean)
                     sort order: ++
-                    Map-reduce partition columns: 100 (type: int), true (type: boolean)
+                    Map-reduce partition columns: _col0 (type: int), _col1 (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Left Semi Join 0 to 1
           keys:
-            0 _col3 (type: int), true (type: boolean)
+            0 100 (type: int), true (type: boolean)
             1 _col0 (type: int), _col1 (type: boolean)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 2 Data size: 44 Basic stats: COMPLETE Column stats: NONE
@@ -745,7 +749,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((dimid = 100) = true) and dimid is not null and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and dimid is not null) (type: boolean)
               Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -761,10 +765,10 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((id = 100) = true) and id is not null and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and id is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: id (type: int), (id = 100) (type: boolean)
+                expressions: id (type: int), true (type: boolean)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cp_sel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cp_sel.q.out b/ql/src/test/results/clientpositive/cp_sel.q.out
index f42ec84..9ee9754 100644
--- a/ql/src/test/results/clientpositive/cp_sel.q.out
+++ b/ql/src/test/results/clientpositive/cp_sel.q.out
@@ -16,18 +16,18 @@ STAGE PLANS:
             alias: srcpart
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string), value (type: string), 'hello' (type: string), 'world' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: 1 (type: int)
                 sort order: +
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                 TopN Hash Memory Usage: 0.1
-                value expressions: _col0 (type: string), _col1 (type: string)
+                value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), 'hello' (type: string), 'world' (type: string)
+          expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
           Limit
@@ -87,18 +87,18 @@ STAGE PLANS:
             alias: srcpart
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string), value (type: string), 'hello' (type: string), 'world' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: string)
                 sort order: +
                 Map-reduce partition columns: _col0 (type: string)
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col1 (type: string)
+                value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), 'hello' (type: string), 'world' (type: string)
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/create_genericudf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_genericudf.q.out b/ql/src/test/results/clientpositive/create_genericudf.q.out
index db3a9b5..b7771b2 100644
--- a/ql/src/test/results/clientpositive/create_genericudf.q.out
+++ b/ql/src/test/results/clientpositive/create_genericudf.q.out
@@ -52,9 +52,9 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dest1
 POSTHOOK: Lineage: dest1.c1 SIMPLE []
 POSTHOOK: Lineage: dest1.c2 SIMPLE []
-POSTHOOK: Lineage: dest1.c3 EXPRESSION []
-POSTHOOK: Lineage: dest1.c4 EXPRESSION []
-POSTHOOK: Lineage: dest1.c5 EXPRESSION []
+POSTHOOK: Lineage: dest1.c3 SIMPLE []
+POSTHOOK: Lineage: dest1.c4 SIMPLE []
+POSTHOOK: Lineage: dest1.c5 SIMPLE []
 POSTHOOK: Lineage: dest1.c6 SIMPLE []
 POSTHOOK: Lineage: dest1.c7 SIMPLE []
 PREHOOK: query: SELECT dest1.* FROM dest1 LIMIT 1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/create_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out
index e23a993..809f701 100644
--- a/ql/src/test/results/clientpositive/create_view.q.out
+++ b/ql/src/test/results/clientpositive/create_view.q.out
@@ -559,7 +559,7 @@ POSTHOOK: Input: default@table1
 POSTHOOK: Input: default@view4
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@view5
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM view5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@table1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cross_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_join.q.out b/ql/src/test/results/clientpositive/cross_join.q.out
index f01993d..79ef5b3 100644
--- a/ql/src/test/results/clientpositive/cross_join.q.out
+++ b/ql/src/test/results/clientpositive/cross_join.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- current
 explain select src.key from src join src src2
 PREHOOK: type: QUERY
@@ -55,7 +55,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- ansi cross join
 explain select src.key from src cross join src src2
 PREHOOK: type: QUERY
@@ -179,7 +179,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain select src.key from src join src src2
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select src.key from src join src src2
@@ -242,7 +242,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain select src.key from src cross join src src2
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select src.key from src cross join src src2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cross_join_merge.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_join_merge.q.out b/ql/src/test/results/clientpositive/cross_join_merge.q.out
index f15dd17..f0b80a7 100644
--- a/ql/src/test/results/clientpositive/cross_join_merge.q.out
+++ b/ql/src/test/results/clientpositive/cross_join_merge.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 select src1.key from src src1 join src src2 join src src3
 PREHOOK: type: QUERY
@@ -233,7 +233,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain
 select src1.key from src src1 join src src2 on 5 = src2.key join src src3 on src1.key=src3.key
 PREHOOK: type: QUERY
@@ -337,7 +337,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 select src1.key from src src1 left outer join src src2 join src src3
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out
index 4feb798..907319d 100644
--- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out
+++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
 POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join B
@@ -90,7 +90,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
@@ -194,7 +194,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join 
          (select d1.key 
           from B d1 join B d2 on d1.key = d2.key
@@ -330,8 +330,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1  group by d1.key) od1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1  group by d1.key) od1
@@ -450,7 +450,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select * from 
 (select A.key from A  group by key) ss join
 (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cross_product_check_2.q.out
index f34f2b5..bb36f84 100644
--- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
 POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[9][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join B
@@ -97,7 +97,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
 PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A
@@ -191,7 +191,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[28][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[27][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join 
          (select d1.key 
           from B d1 join B d2 on d1.key = d2.key 
@@ -327,8 +327,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[25][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[26][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Stage-5:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1
@@ -450,9 +450,9 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-7:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[34][bigTable=?] in task 'Stage-7:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain select * from 
 (select A.key from A group by key) ss join 
 (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cte_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_5.q.out b/ql/src/test/results/clientpositive/cte_5.q.out
index 96a6543..e9d700d 100644
--- a/ql/src/test/results/clientpositive/cte_5.q.out
+++ b/ql/src/test/results/clientpositive/cte_5.q.out
@@ -118,7 +118,7 @@ STAGE PLANS:
                Inner Join 0 to 1
           keys:
             0 UDFToDouble(_col0) (type: double)
-            1 UDFToDouble('5') (type: double)
+            1 5.0 (type: double)
           outputColumnNames: _col0
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cte_mat_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_mat_1.q.out b/ql/src/test/results/clientpositive/cte_mat_1.q.out
index b7d5f7d..bb007c1 100644
--- a/ql/src/test/results/clientpositive/cte_mat_1.q.out
+++ b/ql/src/test/results/clientpositive/cte_mat_1.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 with q1 as (select * from src where key= '5')
 select a.key
@@ -27,9 +28,7 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '5' (type: string)
-                  sort order: +
-                  Map-reduce partition columns: '5' (type: string)
+                  sort order: 
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
@@ -40,17 +39,15 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '5' (type: string)
-                  sort order: +
-                  Map-reduce partition columns: '5' (type: string)
+                  sort order: 
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 '5' (type: string)
-            1 '5' (type: string)
+            0 
+            1 
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: '5' (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cte_mat_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_mat_2.q.out b/ql/src/test/results/clientpositive/cte_mat_2.q.out
index b7d5f7d..bb007c1 100644
--- a/ql/src/test/results/clientpositive/cte_mat_2.q.out
+++ b/ql/src/test/results/clientpositive/cte_mat_2.q.out
@@ -1,3 +1,4 @@
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 with q1 as (select * from src where key= '5')
 select a.key
@@ -27,9 +28,7 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '5' (type: string)
-                  sort order: +
-                  Map-reduce partition columns: '5' (type: string)
+                  sort order: 
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
@@ -40,17 +39,15 @@ STAGE PLANS:
               Select Operator
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: '5' (type: string)
-                  sort order: +
-                  Map-reduce partition columns: '5' (type: string)
+                  sort order: 
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Join Operator
           condition map:
                Inner Join 0 to 1
           keys:
-            0 '5' (type: string)
-            1 '5' (type: string)
+            0 
+            1 
           Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: '5' (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/decimal_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_stats.q.out b/ql/src/test/results/clientpositive/decimal_stats.q.out
index 0ea9ed6..72806f0 100644
--- a/ql/src/test/results/clientpositive/decimal_stats.q.out
+++ b/ql/src/test/results/clientpositive/decimal_stats.q.out
@@ -29,7 +29,7 @@ POSTHOOK: query: insert overwrite table decimal_1
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@decimal_1
-POSTHOOK: Lineage: decimal_1.t EXPRESSION []
+POSTHOOK: Lineage: decimal_1.t SIMPLE []
 POSTHOOK: Lineage: decimal_1.u EXPRESSION []
 POSTHOOK: Lineage: decimal_1.v EXPRESSION []
 PREHOOK: query: analyze table decimal_1 compute statistics for columns

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
index 9a09c4c..86346b3 100644
--- a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
@@ -1046,14 +1046,14 @@ STAGE PLANS:
           outputColumnNames: _col2, _col4, _col5, _col6
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col6 (type: string), _col5 (type: int), _col4 (type: int), _col2 (type: int)
-            outputColumnNames: _col0, _col1, _col2, _col4
+            expressions: _col4 (type: int), _col5 (type: int), _col6 (type: string), _col2 (type: int)
+            outputColumnNames: _col4, _col5, _col6, _col2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Group By Operator
-              aggregations: avg(_col4), stddev_samp(_col4)
-              keys: _col0 (type: string), _col1 (type: int), _col2 (type: int), 3 (type: int)
+              aggregations: stddev_samp(_col2), avg(_col2)
+              keys: _col4 (type: int), _col5 (type: int), _col6 (type: string)
               mode: hash
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator
                 compressed: true
@@ -1067,28 +1067,28 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), 3 (type: int)
-              sort order: ++++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: int), 3 (type: int)
+              key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string)
+              sort order: +++
+              Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: string)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col4 (type: struct<count:bigint,sum:double,input:int>), _col5 (type: struct<count:bigint,sum:double,variance:double>)
+              value expressions: _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,input:int>)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: avg(VALUE._col0), stddev_samp(VALUE._col1)
-          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: int), 3 (type: int)
+          aggregations: stddev_samp(VALUE._col0), avg(VALUE._col1)
+          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), _col5 (type: double)
-            outputColumnNames: _col1, _col2, _col4, _col5
+            expressions: _col1 (type: int), _col0 (type: int), _col3 (type: double), _col4 (type: double)
+            outputColumnNames: _col1, _col2, _col3, _col4
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col5 / _col4) > 1.0)) END (type: boolean)
+              predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col5 / _col4)) END (type: double)
-                outputColumnNames: _col1, _col2, _col4, _col5
+                expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double)
+                outputColumnNames: _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 File Output Operator
                   compressed: true
@@ -1106,14 +1106,14 @@ STAGE PLANS:
               sort order: ++
               Map-reduce partition columns: _col2 (type: int), _col1 (type: int)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col4 (type: double), _col5 (type: double)
+              value expressions: _col3 (type: double), _col4 (type: double)
           TableScan
             Reduce Output Operator
               key expressions: _col2 (type: int), _col1 (type: int)
               sort order: ++
               Map-reduce partition columns: _col2 (type: int), _col1 (type: int)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col4 (type: double), _col5 (type: double)
+              value expressions: _col3 (type: double), _col4 (type: double)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -1121,10 +1121,10 @@ STAGE PLANS:
           keys:
             0 _col2 (type: int), _col1 (type: int)
             1 _col2 (type: int), _col1 (type: int)
-          outputColumnNames: _col1, _col2, _col4, _col5, _col7, _col8, _col10, _col11
+          outputColumnNames: _col1, _col2, _col3, _col4, _col6, _col7, _col8, _col9
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), _col5 (type: double), _col7 (type: int), _col8 (type: int), _col10 (type: double), _col11 (type: double)
+            expressions: _col1 (type: int), _col2 (type: int), _col3 (type: double), _col4 (type: double), _col6 (type: int), _col7 (type: int), _col8 (type: double), _col9 (type: double)
             outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col6, _col8, _col9
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
@@ -1145,7 +1145,7 @@ STAGE PLANS:
               value expressions: _col5 (type: int), _col6 (type: int)
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), 3 (type: int), KEY.reducesinkkey3 (type: double), KEY.reducesinkkey4 (type: double), VALUE._col0 (type: int), VALUE._col1 (type: int), 4 (type: int), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey7 (type: double)
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: double), KEY.reducesinkkey4 (type: double), VALUE._col0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey5 (type: int), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey7 (type: double)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           File Output Operator
@@ -1283,14 +1283,14 @@ STAGE PLANS:
           outputColumnNames: _col2, _col4, _col5, _col6
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col6 (type: string), _col5 (type: int), _col4 (type: int), _col2 (type: int)
-            outputColumnNames: _col0, _col1, _col2, _col4
+            expressions: _col4 (type: int), _col5 (type: int), _col6 (type: string), _col2 (type: int)
+            outputColumnNames: _col4, _col5, _col6, _col2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Group By Operator
-              aggregations: avg(_col4), stddev_samp(_col4)
-              keys: _col0 (type: string), _col1 (type: int), _col2 (type: int), 4 (type: int)
+              aggregations: stddev_samp(_col2), avg(_col2)
+              keys: _col4 (type: int), _col5 (type: int), _col6 (type: string)
               mode: hash
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator
                 compressed: true
@@ -1304,28 +1304,28 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), 4 (type: int)
-              sort order: ++++
-              Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: int), 4 (type: int)
+              key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string)
+              sort order: +++
+              Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: string)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              value expressions: _col4 (type: struct<count:bigint,sum:double,input:int>), _col5 (type: struct<count:bigint,sum:double,variance:double>)
+              value expressions: _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,input:int>)
       Reduce Operator Tree:
         Group By Operator
-          aggregations: avg(VALUE._col0), stddev_samp(VALUE._col1)
-          keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: int), 4 (type: int)
+          aggregations: stddev_samp(VALUE._col0), avg(VALUE._col1)
+          keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), _col5 (type: double)
-            outputColumnNames: _col1, _col2, _col4, _col5
+            expressions: _col1 (type: int), _col0 (type: int), _col3 (type: double), _col4 (type: double)
+            outputColumnNames: _col1, _col2, _col3, _col4
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col5 / _col4) > 1.0)) END (type: boolean)
+              predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col5 / _col4)) END (type: double)
-                outputColumnNames: _col1, _col2, _col4, _col5
+                expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double)
+                outputColumnNames: _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 File Output Operator
                   compressed: true

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 391acff..e0d022f 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -2267,28 +2267,28 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over1k
-            Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (s = 'foo') (type: boolean)
-              Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: si (type: smallint), b (type: bigint), f (type: float), t (type: tinyint), i (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col4, _col5
-                Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), t (type: tinyint), i (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 'foo' (type: string), _col4 (type: tinyint), _col5 (type: int)
+                  key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   sort order: +++
-                  Map-reduce partition columns: 'foo' (type: string), _col4 (type: tinyint), _col5 (type: int)
-                  Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
+                  Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), 'foo' (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2332,18 +2332,18 @@ STAGE PLANS:
               predicate: (t = 27) (type: boolean)
               Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), i (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col5
+                expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27 (type: tinyint), i (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string), 27 (type: tinyint), _col5 (type: int)
+                  key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   sort order: +++
-                  Map-reduce partition columns: _col3 (type: string), 27 (type: tinyint), _col5 (type: int)
+                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), 27 (type: tinyint), KEY._col5 (type: int)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -2392,18 +2392,18 @@ STAGE PLANS:
               predicate: (i = 100) (type: boolean)
               Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), t (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), t (type: tinyint), 100 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string), _col4 (type: tinyint), 100 (type: int)
+                  key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   sort order: +++
-                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), 100 (type: int)
+                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), 100 (type: int)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -2452,18 +2452,18 @@ STAGE PLANS:
               predicate: ((i = 100) and (t = 27)) (type: boolean)
               Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
+                expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27 (type: tinyint), 100 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string), 27 (type: tinyint), 100 (type: int)
+                  key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   sort order: +++
-                  Map-reduce partition columns: _col3 (type: string), 27 (type: tinyint), 100 (type: int)
+                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), 27 (type: tinyint), 100 (type: int)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -2507,28 +2507,28 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over1k
-            Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((i = 100) and (s = 'foo')) (type: boolean)
-              Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: si (type: smallint), b (type: bigint), f (type: float), t (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col4
-                Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), t (type: tinyint), 100 (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 'foo' (type: string), _col4 (type: tinyint), 100 (type: int)
+                  key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   sort order: +++
-                  Map-reduce partition columns: 'foo' (type: string), _col4 (type: tinyint), 100 (type: int)
-                  Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
+                  Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), 'foo' (type: string), KEY._col4 (type: tinyint), 100 (type: int)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2567,28 +2567,28 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over1k
-            Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((t = 27) and (s = 'foo')) (type: boolean)
-              Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: si (type: smallint), b (type: bigint), f (type: float), i (type: int)
-                outputColumnNames: _col0, _col1, _col2, _col5
-                Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), i (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 'foo' (type: string), 27 (type: tinyint), _col5 (type: int)
+                  key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                   sort order: +++
-                  Map-reduce partition columns: 'foo' (type: string), 27 (type: tinyint), _col5 (type: int)
-                  Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                  Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
+                  Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
       Reduce Operator Tree:
         Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), 'foo' (type: string), 27 (type: tinyint), KEY._col5 (type: int)
+          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2632,17 +2632,17 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: over1k
-            Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((i = 100) and (t = 27) and (s = 'foo')) (type: boolean)
-              Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 107 Data size: 13282 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), 100 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 107 Data size: 13282 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 107 Data size: 13282 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat


[19/66] [abbrv] hive git commit: HIVE-13736: View's input/output formats are TEXT by default. (Yongzhi Chen, reviewed by Chaoyu Tang)

Posted by sp...@apache.org.
HIVE-13736: View's input/output formats are TEXT by default. (Yongzhi Chen, reviewed by Chaoyu Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c229f995
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c229f995
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c229f995

Branch: refs/heads/java8
Commit: c229f99580dba0eee83e253c5b7c4c596fc0d215
Parents: eff6e05
Author: Yongzhi Chen <yc...@apache.org>
Authored: Mon May 16 15:40:22 2016 -0400
Committer: Yongzhi Chen <yc...@apache.org>
Committed: Tue May 24 11:10:06 2016 -0400

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  13 ++
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   5 +-
 .../hadoop/hive/ql/plan/CreateViewDesc.java     |  24 +++-
 .../clientpositive/create_view_defaultformats.q |  14 ++
 .../alter_view_as_select_with_partition.q.out   |   4 +-
 .../clientpositive/alter_view_as_select.q.out   |  12 +-
 .../clientpositive/create_or_replace_view.q.out |  20 +--
 .../results/clientpositive/create_view.q.out    |  56 ++++----
 .../create_view_defaultformats.q.out            | 128 +++++++++++++++++++
 .../create_view_partitioned.q.out               |  20 +--
 .../clientpositive/create_view_translate.q.out  |   8 +-
 .../llap/selectDistinctStar.q.out               |   8 +-
 .../clientpositive/selectDistinctStar.q.out     |   8 +-
 .../clientpositive/tez/selectDistinctStar.q.out |   8 +-
 14 files changed, 254 insertions(+), 74 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 1ebe963..717589a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -4159,6 +4159,12 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         oldview.getTTable().getParameters().putAll(crtView.getTblProps());
       }
       oldview.setPartCols(crtView.getPartCols());
+      if (crtView.getInputFormat() != null) {
+        oldview.setInputFormatClass(crtView.getInputFormat());
+      }
+      if (crtView.getOutputFormat() != null) {
+        oldview.setOutputFormatClass(crtView.getOutputFormat());
+      }
       oldview.checkValidity(null);
       try {
         db.alterTable(crtView.getViewName(), oldview, null);
@@ -4186,6 +4192,13 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         tbl.setPartCols(crtView.getPartCols());
       }
 
+      if (crtView.getInputFormat() != null) {
+        tbl.setInputFormatClass(crtView.getInputFormat());
+      }
+      if (crtView.getOutputFormat() != null) {
+        tbl.setOutputFormatClass(crtView.getOutputFormat());
+      }
+
       db.createTable(tbl, crtView.getIfNotExists());
       work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 8c93018..265dd7e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -11762,8 +11762,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       orReplace = true;
     }
 
+    StorageFormat defaultFmt = new StorageFormat(conf);
+    defaultFmt.fillDefaultStorageFormat(false);
     createVwDesc = new CreateViewDesc(
-      dbDotTable, cols, comment, tblProps, partColNames,
+      dbDotTable, cols, comment, defaultFmt.getInputFormat(),
+      defaultFmt.getOutputFormat(), tblProps, partColNames,
       ifNotExists, orReplace, isAlterViewAs);
 
     unparseTranslator.enable();

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
index a5cf076..81c4f77 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java
@@ -36,6 +36,8 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
   private static final long serialVersionUID = 1L;
 
   private String viewName;
+  private String inputFormat;
+  private String outputFormat;
   private String originalText;
   private String expandedText;
   private List<FieldSchema> schema;
@@ -54,12 +56,15 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
   }
 
   public CreateViewDesc(String viewName, List<FieldSchema> schema,
-      String comment, Map<String, String> tblProps,
+      String comment, String inputFormat,
+      String outputFormat, Map<String, String> tblProps,
       List<String> partColNames, boolean ifNotExists,
       boolean orReplace, boolean isAlterViewAs) {
     this.viewName = viewName;
     this.schema = schema;
     this.comment = comment;
+    this.inputFormat = inputFormat;
+    this.outputFormat = outputFormat;
     this.tblProps = tblProps;
     this.partColNames = partColNames;
     this.ifNotExists = ifNotExists;
@@ -172,4 +177,21 @@ public class CreateViewDesc extends DDLDesc implements Serializable {
   public void setIsAlterViewAs(boolean isAlterViewAs) {
     this.isAlterViewAs = isAlterViewAs;
   }
+
+  public String getInputFormat() {
+    return inputFormat;
+  }
+
+  public void setInputFormat(String inputFormat) {
+    this.inputFormat = inputFormat;
+  }
+
+  public String getOutputFormat() {
+    return outputFormat;
+  }
+
+  public void setOutputFormat(String outputFormat) {
+    this.outputFormat = outputFormat;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/queries/clientpositive/create_view_defaultformats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/create_view_defaultformats.q b/ql/src/test/queries/clientpositive/create_view_defaultformats.q
new file mode 100644
index 0000000..66fa141
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/create_view_defaultformats.q
@@ -0,0 +1,14 @@
+drop view if exists sfsrc;
+drop view if exists rcsrc;
+set hive.default.fileformat=SequenceFile;
+create view sfsrc as select * from src;
+set hive.default.fileformat=RcFile;
+create view rcsrc as select * from src;
+describe formatted sfsrc;
+describe formatted rcsrc;
+select * from sfsrc where key = 100 limit 1;
+select * from rcsrc where key = 100 limit 1;
+drop view sfsrc;
+drop view rcsrc;
+set hive.default.fileformat=TextFile;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out b/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
index 1cbfd75..9b84227 100644
--- a/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
+++ b/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
@@ -55,8 +55,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/alter_view_as_select.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_view_as_select.q.out b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
index 3666221..2d82395 100644
--- a/ql/src/test/results/clientpositive/alter_view_as_select.q.out
+++ b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
@@ -37,8 +37,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -77,8 +77,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -126,8 +126,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/create_or_replace_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_or_replace_view.q.out b/ql/src/test/results/clientpositive/create_or_replace_view.q.out
index dd5bf13..f6f26d2 100644
--- a/ql/src/test/results/clientpositive/create_or_replace_view.q.out
+++ b/ql/src/test/results/clientpositive/create_or_replace_view.q.out
@@ -37,8 +37,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -127,8 +127,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -220,8 +220,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -290,8 +290,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -381,8 +381,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/create_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out
index 809f701..d9c1e11 100644
--- a/ql/src/test/results/clientpositive/create_view.q.out
+++ b/ql/src/test/results/clientpositive/create_view.q.out
@@ -257,8 +257,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -306,8 +306,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -353,8 +353,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -402,8 +402,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -764,8 +764,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -844,8 +844,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -922,8 +922,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -990,8 +990,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -1066,8 +1066,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -1138,8 +1138,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -1223,8 +1223,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -1344,8 +1344,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -1460,8 +1460,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -1545,8 +1545,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/create_view_defaultformats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view_defaultformats.q.out b/ql/src/test/results/clientpositive/create_view_defaultformats.q.out
new file mode 100644
index 0000000..dbc4a20
--- /dev/null
+++ b/ql/src/test/results/clientpositive/create_view_defaultformats.q.out
@@ -0,0 +1,128 @@
+PREHOOK: query: drop view if exists sfsrc
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: drop view if exists sfsrc
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: drop view if exists rcsrc
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: drop view if exists rcsrc
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: create view sfsrc as select * from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@sfsrc
+POSTHOOK: query: create view sfsrc as select * from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@sfsrc
+PREHOOK: query: create view rcsrc as select * from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@rcsrc
+POSTHOOK: query: create view rcsrc as select * from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@rcsrc
+PREHOOK: query: describe formatted sfsrc
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@sfsrc
+POSTHOOK: query: describe formatted sfsrc
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@sfsrc
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+Table Type:         	VIRTUAL_VIEW        	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	null                	 
+InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+	 	 
+# View Information	 	 
+View Original Text: 	select * from src   	 
+View Expanded Text: 	select `src`.`key`, `src`.`value` from `default`.`src`	 
+PREHOOK: query: describe formatted rcsrc
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@rcsrc
+POSTHOOK: query: describe formatted rcsrc
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@rcsrc
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+Table Type:         	VIRTUAL_VIEW        	 
+Table Parameters:	 	 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	null                	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+	 	 
+# View Information	 	 
+View Original Text: 	select * from src   	 
+View Expanded Text: 	select `src`.`key`, `src`.`value` from `default`.`src`	 
+PREHOOK: query: select * from sfsrc where key = 100 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@sfsrc
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from sfsrc where key = 100 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@sfsrc
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+100	val_100
+PREHOOK: query: select * from rcsrc where key = 100 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@rcsrc
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select * from rcsrc where key = 100 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@rcsrc
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+100	val_100
+PREHOOK: query: drop view sfsrc
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@sfsrc
+PREHOOK: Output: default@sfsrc
+POSTHOOK: query: drop view sfsrc
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@sfsrc
+POSTHOOK: Output: default@sfsrc
+PREHOOK: query: drop view rcsrc
+PREHOOK: type: DROPVIEW
+PREHOOK: Input: default@rcsrc
+PREHOOK: Output: default@rcsrc
+POSTHOOK: query: drop view rcsrc
+POSTHOOK: type: DROPVIEW
+POSTHOOK: Input: default@rcsrc
+POSTHOOK: Output: default@rcsrc

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/create_view_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view_partitioned.q.out b/ql/src/test/results/clientpositive/create_view_partitioned.q.out
index caa2251..15d777a 100644
--- a/ql/src/test/results/clientpositive/create_view_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/create_view_partitioned.q.out
@@ -78,8 +78,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -175,8 +175,8 @@ POSTHOOK: type: SHOW_TABLESTATUS
 tableName:vp1
 #### A masked pattern was here ####
 location:null
-inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+inputformat:org.apache.hadoop.mapred.TextInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 columns:struct columns { string key}
 partitioned:true
 partitionColumns:struct partition_columns { string value}
@@ -188,8 +188,8 @@ POSTHOOK: type: SHOW_TABLESTATUS
 tableName:vp1
 #### A masked pattern was here ####
 location:null
-inputformat:org.apache.hadoop.mapred.SequenceFileInputFormat
-outputformat:org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+inputformat:org.apache.hadoop.mapred.TextInputFormat
+outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 columns:struct columns { string key}
 partitioned:true
 partitionColumns:struct partition_columns { string value}
@@ -285,8 +285,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -398,8 +398,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/create_view_translate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_view_translate.q.out b/ql/src/test/results/clientpositive/create_view_translate.q.out
index 886a01b..2789f8f 100644
--- a/ql/src/test/results/clientpositive/create_view_translate.q.out
+++ b/ql/src/test/results/clientpositive/create_view_translate.q.out
@@ -36,8 +36,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -81,8 +81,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out
index 5594a0e..fbc8567 100644
--- a/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out
@@ -1397,8 +1397,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -3850,8 +3850,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/selectDistinctStar.q.out
index a95e945..d54fa68 100644
--- a/ql/src/test/results/clientpositive/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/selectDistinctStar.q.out
@@ -1370,8 +1370,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -3796,8 +3796,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 

http://git-wip-us.apache.org/repos/asf/hive/blob/c229f995/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
index 8c96260..050bd79 100644
--- a/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
@@ -1393,8 +1393,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 
@@ -3842,8 +3842,8 @@ Table Parameters:
 	 	 
 # Storage Information	 	 
 SerDe Library:      	null                	 
-InputFormat:        	org.apache.hadoop.mapred.SequenceFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
 Compressed:         	No                  	 
 Num Buckets:        	-1                  	 
 Bucket Columns:     	[]                  	 


[51/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
new file mode 100644
index 0000000..09cb847
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out
@@ -0,0 +1,504 @@
+PREHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) 
+stored as DIRECTORIES
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+partitioned by (ds String, hr String) 
+skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) 
+stored as DIRECTORIES
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_dynamic_part
+PREHOOK: query: -- list bucketing DML
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_dynamic_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.list_bucketing_dynamic_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.list_bucketing_dynamic_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- check DML result
+desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	3                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 12]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	3                   
+	numRows             	500                 
+	rawDataSize         	5312                
+	totalSize           	5812                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(1) from srcpart where ds='2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds='2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+103	val_103
+103	val_103
+103	val_103
+103	val_103
+PREHOOK: query: explain extended
+select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              numFiles 3
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+          Partition
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              numFiles 3
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_dynamic_part
+          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = '103') and (value = 'val_103')) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: '103' (type: string), 'val_103' (type: string), '2008-04-08' (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+103	val_103	2008-04-08	11
+103	val_103	2008-04-08	11
+103	val_103	2008-04-08	12
+103	val_103	2008-04-08	12
+PREHOOK: query: -- clean up resources
+drop table list_bucketing_dynamic_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- clean up resources
+drop table list_bucketing_dynamic_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Output: default@list_bucketing_dynamic_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
deleted file mode 100644
index c022618..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
+++ /dev/null
@@ -1,1007 +0,0 @@
-PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-ds=2008-04-08/hr=a1
-ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, a1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	2                   
-	numRows             	16                  
-	rawDataSize         	136                 
-	totalSize           	310                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10734               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns hr
-                      partition_columns.types string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns hr
-                partition_columns.types string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              partition_columns hr
-              partition_columns.types string
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns hr
-                partition_columns.types string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              partition_columns hr
-              partition_columns.types string
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns hr
-                partition_columns.types string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-ds=2008-04-08/hr=a1
-ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, a1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	1                   
-	numRows             	16                  
-	rawDataSize         	136                 
-	totalSize           	254                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10622               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_dynamic_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_dynamic_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr a1
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 1
-              numRows 16
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 136
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 254
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr b1
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 4
-              numRows 984
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9488
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10622
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_dynamic_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-484	val_484	2008-04-08	b1
-484	val_484	2008-04-08	b1
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part


[54/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
deleted file mode 100644
index dcfbec0..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
+++ /dev/null
@@ -1,591 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 6
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10898
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	12
-51	val_51	2008-04-08	12
-PREHOOK: query: select * from list_bucketing_static_part where key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-90
-PREHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-90
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
deleted file mode 100644
index aeeba03..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.8.out
+++ /dev/null
@@ -1,692 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_static_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '11'
-               =
-                  TOK_TABLE_OR_COL
-                     key
-                  '484'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               'val_484'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_static_part
-            Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3
-                        columns.types string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_484
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 6
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10898
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Truncated Path -> Alias:
-        /list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484 [list_bucketing_static_part]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	12
-51	val_51	2008-04-08	12
-PREHOOK: query: select * from list_bucketing_static_part where key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-90
-PREHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-90
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
new file mode 100644
index 0000000..a29c224
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out
@@ -0,0 +1,589 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	6                   
+	numRows             	1000                
+	rawDataSize         	9624                
+	totalSize           	10898               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from list_bucketing_static_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from list_bucketing_static_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+1000
+PREHOOK: query: explain extended
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              numFiles 6
+              numRows 1000
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 9624
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 10898
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_static_part
+          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	11
+PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	12
+PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
+-- but query should succeed for 51 or 51 and val_14
+select * from srcpart where ds = '2008-04-08' and key = '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
+-- but query should succeed for 51 or 51 and val_14
+select * from srcpart where ds = '2008-04-08' and key = '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+51	val_51	2008-04-08	11
+51	val_51	2008-04-08	11
+51	val_51	2008-04-08	12
+51	val_51	2008-04-08	12
+PREHOOK: query: select * from list_bucketing_static_part where key = '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_static_part where key = '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+51	val_51	2008-04-08	11
+51	val_51	2008-04-08	11
+51	val_51	2008-04-08	11
+51	val_51	2008-04-08	11
+PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+PREHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
+select count(1) from srcpart where ds = '2008-04-08' and key < '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
+select count(1) from srcpart where ds = '2008-04-08' and key < '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+910
+PREHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+910
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+914
+PREHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+914
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+86
+PREHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+86
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+90
+PREHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+90
+PREHOOK: query: -- clean up
+drop table list_bucketing_static_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- clean up
+drop table list_bucketing_static_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Output: default@list_bucketing_static_part


[26/66] [abbrv] hive git commit: Preparing for 2.2.0 development

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
new file mode 100644
index 0000000..39ba7cb
--- /dev/null
+++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql
@@ -0,0 +1,809 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+    PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+    CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+    CD_ID NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NOT NULL,
+    TYPE_NAME VARCHAR2(4000) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+    PART_ID NUMBER NOT NULL,
+    PART_KEY_VAL VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+    DB_ID NUMBER NOT NULL,
+    "DESC" VARCHAR2(4000) NULL,
+    DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    OWNER_TYPE VARCHAR2(10) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+    PART_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+    SERDE_ID NUMBER NOT NULL,
+    "NAME" VARCHAR2(128) NULL,
+    SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+    TYPES_ID NUMBER NOT NULL,
+    TYPE_NAME VARCHAR2(128) NULL,
+    TYPE1 VARCHAR2(767) NULL,
+    TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+    TBL_ID NUMBER NOT NULL,
+    PKEY_COMMENT VARCHAR2(4000) NULL,
+    PKEY_NAME VARCHAR2(128) NOT NULL,
+    PKEY_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+    ROLE_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    OWNER_NAME VARCHAR2(128) NULL,
+    ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+CREATE TABLE PARTITIONS
+(
+    PART_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    PART_NAME VARCHAR2(767) NULL,
+    SD_ID NUMBER NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+
+-- Table INDEX_PARAMS for join relationship
+CREATE TABLE INDEX_PARAMS
+(
+    INDEX_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+
+-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+CREATE TABLE TBL_COL_PRIVS
+(
+    TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_COL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+
+-- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+CREATE TABLE IDXS
+(
+    INDEX_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+    INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+    INDEX_NAME VARCHAR2(128) NULL,
+    INDEX_TBL_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    ORIG_TBL_ID NUMBER NULL,
+    SD_ID NUMBER NULL
+);
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+
+-- Table BUCKETING_COLS for join relationship
+CREATE TABLE BUCKETING_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    BUCKET_COL_NAME VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TYPE_FIELDS for join relationship
+CREATE TABLE TYPE_FIELDS
+(
+    TYPE_NAME NUMBER NOT NULL,
+    "COMMENT" VARCHAR2(256) NULL,
+    FIELD_NAME VARCHAR2(128) NOT NULL,
+    FIELD_TYPE VARCHAR2(767) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+
+-- Table SD_PARAMS for join relationship
+CREATE TABLE SD_PARAMS
+(
+    SD_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+
+-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE TABLE GLOBAL_PRIVS
+(
+    USER_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    USER_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+
+-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+CREATE TABLE SDS
+(
+    SD_ID NUMBER NOT NULL,
+    CD_ID NUMBER NULL,
+    INPUT_FORMAT VARCHAR2(4000) NULL,
+    IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+    LOCATION VARCHAR2(4000) NULL,
+    NUM_BUCKETS NUMBER (10) NOT NULL,
+    OUTPUT_FORMAT VARCHAR2(4000) NULL,
+    SERDE_ID NUMBER NULL,
+    IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+);
+
+ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+
+-- Table TABLE_PARAMS for join relationship
+CREATE TABLE TABLE_PARAMS
+(
+    TBL_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+
+-- Table SORT_COLS for join relationship
+CREATE TABLE SORT_COLS
+(
+    SD_ID NUMBER NOT NULL,
+    "COLUMN_NAME" VARCHAR2(1000) NULL,
+    "ORDER" NUMBER (10) NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+CREATE TABLE TBL_PRIVS
+(
+    TBL_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    TBL_PRIV VARCHAR2(128) NULL,
+    TBL_ID NUMBER NULL
+);
+
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+
+-- Table DATABASE_PARAMS for join relationship
+CREATE TABLE DATABASE_PARAMS
+(
+    DB_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(180) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+
+-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+CREATE TABLE ROLE_MAP
+(
+    ROLE_GRANT_ID NUMBER NOT NULL,
+    ADD_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    ROLE_ID NUMBER NULL
+);
+
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+
+-- Table SERDE_PARAMS for join relationship
+CREATE TABLE SERDE_PARAMS
+(
+    SERDE_ID NUMBER NOT NULL,
+    PARAM_KEY VARCHAR2(256) NOT NULL,
+    PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+
+-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+CREATE TABLE PART_PRIVS
+(
+    PART_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PART_ID NUMBER NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    PART_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+
+-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+CREATE TABLE DB_PRIVS
+(
+    DB_GRANT_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    GRANT_OPTION NUMBER (5) NOT NULL,
+    GRANTOR VARCHAR2(128) NULL,
+    GRANTOR_TYPE VARCHAR2(128) NULL,
+    PRINCIPAL_NAME VARCHAR2(128) NULL,
+    PRINCIPAL_TYPE VARCHAR2(128) NULL,
+    DB_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+
+-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+CREATE TABLE TBLS
+(
+    TBL_ID NUMBER NOT NULL,
+    CREATE_TIME NUMBER (10) NOT NULL,
+    DB_ID NUMBER NULL,
+    LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+    OWNER VARCHAR2(767) NULL,
+    RETENTION NUMBER (10) NOT NULL,
+    SD_ID NUMBER NULL,
+    TBL_NAME VARCHAR2(128) NULL,
+    TBL_TYPE VARCHAR2(128) NULL,
+    VIEW_EXPANDED_TEXT CLOB NULL,
+    VIEW_ORIGINAL_TEXT CLOB NULL
+);
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+
+-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE TABLE PARTITION_EVENTS
+(
+    PART_NAME_ID NUMBER NOT NULL,
+    DB_NAME VARCHAR2(128) NULL,
+    EVENT_TIME NUMBER NOT NULL,
+    EVENT_TYPE NUMBER (10) NOT NULL,
+    PARTITION_NAME VARCHAR2(767) NULL,
+    TBL_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+
+-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+CREATE TABLE SKEWED_STRING_LIST
+(
+    STRING_LIST_ID NUMBER NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+
+CREATE TABLE SKEWED_STRING_LIST_VALUES
+(
+    STRING_LIST_ID NUMBER NOT NULL,
+    "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_NAMES
+(
+    SD_ID NUMBER NOT NULL,
+    "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+(
+    SD_ID NUMBER NOT NULL,
+    STRING_LIST_ID_KID NUMBER NOT NULL,
+    "LOCATION" VARCHAR2(4000) NULL
+);
+
+CREATE TABLE MASTER_KEYS
+(
+    KEY_ID NUMBER (10) NOT NULL,
+    MASTER_KEY VARCHAR2(767) NULL
+);
+
+CREATE TABLE DELEGATION_TOKENS
+(
+    TOKEN_IDENT VARCHAR2(767) NOT NULL,
+    TOKEN VARCHAR2(767) NULL
+);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE TABLE SKEWED_VALUES
+(
+    SD_ID_OID NUMBER NOT NULL,
+    STRING_LIST_ID_EID NUMBER NOT NULL,
+    INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+-- column statistics
+
+CREATE TABLE TAB_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL, 
+ COLUMN_NAME VARCHAR2(1000) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ TBL_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+CREATE TABLE VERSION (
+  VER_ID NUMBER NOT NULL,
+  SCHEMA_VERSION VARCHAR(127) NOT NULL,
+  VERSION_COMMENT VARCHAR(255)
+);
+ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+
+CREATE TABLE PART_COL_STATS (
+ CS_ID NUMBER NOT NULL,
+ DB_NAME VARCHAR2(128) NOT NULL,
+ TABLE_NAME VARCHAR2(128) NOT NULL,
+ PARTITION_NAME VARCHAR2(767) NOT NULL,
+ COLUMN_NAME VARCHAR2(1000) NOT NULL,
+ COLUMN_TYPE VARCHAR2(128) NOT NULL,
+ PART_ID NUMBER NOT NULL,
+ LONG_LOW_VALUE NUMBER,
+ LONG_HIGH_VALUE NUMBER,
+ DOUBLE_LOW_VALUE NUMBER,
+ DOUBLE_HIGH_VALUE NUMBER,
+ BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+ BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+ NUM_NULLS NUMBER NOT NULL,
+ NUM_DISTINCTS NUMBER,
+ AVG_COL_LEN NUMBER,
+ MAX_COL_LEN NUMBER,
+ NUM_TRUES NUMBER,
+ NUM_FALSES NUMBER,
+ LAST_ANALYZED NUMBER NOT NULL
+);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+
+ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+
+CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+
+CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+
+CREATE TABLE FUNCS (
+  FUNC_ID NUMBER NOT NULL,
+  CLASS_NAME VARCHAR2(4000),
+  CREATE_TIME NUMBER(10) NOT NULL,
+  DB_ID NUMBER,
+  FUNC_NAME VARCHAR2(128),
+  FUNC_TYPE NUMBER(10) NOT NULL,
+  OWNER_NAME VARCHAR2(128),
+  OWNER_TYPE VARCHAR2(10)
+);
+
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+
+CREATE TABLE FUNC_RU (
+  FUNC_ID NUMBER NOT NULL,
+  RESOURCE_TYPE NUMBER(10) NOT NULL,
+  RESOURCE_URI VARCHAR2(4000),
+  INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+
+CREATE TABLE NOTIFICATION_LOG
+(
+    NL_ID NUMBER NOT NULL,
+    EVENT_ID NUMBER NOT NULL,
+    EVENT_TIME NUMBER(10) NOT NULL,
+    EVENT_TYPE VARCHAR2(32) NOT NULL,
+    DB_NAME VARCHAR2(128),
+    TBL_NAME VARCHAR2(128),
+    MESSAGE CLOB NULL
+);
+
+ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+
+CREATE TABLE NOTIFICATION_SEQUENCE
+(
+    NNI_ID NUMBER NOT NULL,
+    NEXT_EVENT_ID NUMBER NOT NULL
+);
+
+ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+
+
+
+-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+
+CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table COLUMNS_V2
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+
+
+-- Constraints for table PARTITION_KEY_VALS
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+
+
+-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME");
+
+
+-- Constraints for table PARTITION_PARAMS
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+
+
+-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+
+-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+
+
+-- Constraints for table PARTITION_KEYS
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+
+
+-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+
+
+-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+
+CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+
+CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+
+
+-- Constraints for table INDEX_PARAMS
+ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+
+
+-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+
+
+-- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+
+CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+
+CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+
+CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+
+
+-- Constraints for table BUCKETING_COLS
+ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+
+
+-- Constraints for table TYPE_FIELDS
+ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+
+
+-- Constraints for table SD_PARAMS
+ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+
+
+-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+CREATE INDEX SDS_N50 ON SDS (CD_ID);
+
+
+-- Constraints for table TABLE_PARAMS
+ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+
+
+-- Constraints for table SORT_COLS
+ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+
+
+-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+
+CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table DATABASE_PARAMS
+ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+
+
+-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+
+CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+
+
+-- Constraints for table SERDE_PARAMS
+ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+
+
+-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+
+
+-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+
+CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+
+
+-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+
+ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+
+CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+
+CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+
+CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+
+
+-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+
+
+-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+
+CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+
+CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+
+
+-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+
+CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+
+CREATE TABLE KEY_CONSTRAINTS
+(
+  CHILD_CD_ID NUMBER,
+  CHILD_INTEGER_IDX NUMBER,
+  CHILD_TBL_ID NUMBER,
+  PARENT_CD_ID NUMBER NOT NULL,
+  PARENT_INTEGER_IDX NUMBER NOT NULL,
+  PARENT_TBL_ID NUMBER NOT NULL,
+  POSITION NUMBER NOT NULL,
+  CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+  CONSTRAINT_TYPE NUMBER NOT NULL,
+  UPDATE_RULE NUMBER,
+  DELETE_RULE NUMBER,
+  ENABLE_VALIDATE_RELY NUMBER NOT NULL
+) ;
+
+ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+
+CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+
+
+------------------------------
+-- Transaction and lock tables
+------------------------------
+@hive-txn-schema-2.2.0.oracle.sql;
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.2.0', 'Hive release version 2.2.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
new file mode 100644
index 0000000..d39baab
--- /dev/null
+++ b/metastore/scripts/upgrade/oracle/hive-txn-schema-2.2.0.oracle.sql
@@ -0,0 +1,129 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the License); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an AS IS BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar2(128),
+  TXN_META_INFO varchar2(128),
+  TXN_HEARTBEAT_COUNT number(10)
+) ROWDEPENDENCIES;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL,
+  TC_OPERATION_TYPE char(1) NOT NULL
+) ROWDEPENDENCIES;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ROWDEPENDENCIES;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT number(10),
+  HL_AGENT_INFO varchar2(128),
+  HL_BLOCKEDBY_EXT_ID number(19),
+  HL_BLOCKEDBY_INT_ID number(19),
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+) ROWDEPENDENCIES;
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID NUMBER(19),
+  CQ_META_INFO BLOB,
+  CQ_HADOOP_JOB_ID varchar2(32)
+) ROWDEPENDENCIES;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID NUMBER(19) PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_WORKER_ID varchar(128),
+  CC_START NUMBER(19),
+  CC_END NUMBER(19),
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID NUMBER(19),
+  CC_META_INFO BLOB,
+  CC_HADOOP_JOB_ID varchar2(32)
+) ROWDEPENDENCIES;
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar2(128) NOT NULL,
+  MT_KEY2 number(19) NOT NULL,
+  MT_COMMENT varchar2(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar2(128) NOT NULL,
+  WS_TABLE varchar2(128) NOT NULL,
+  WS_PARTITION varchar2(767),
+  WS_TXNID number(19) NOT NULL,
+  WS_COMMIT_ID number(19) NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
new file mode 100644
index 0000000..66784a4
--- /dev/null
+++ b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql
@@ -0,0 +1,4 @@
+SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;
+
+UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1;
+SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/oracle/upgrade.order.oracle
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/oracle/upgrade.order.oracle b/metastore/scripts/upgrade/oracle/upgrade.order.oracle
index d07f95d..28a453f 100644
--- a/metastore/scripts/upgrade/oracle/upgrade.order.oracle
+++ b/metastore/scripts/upgrade/oracle/upgrade.order.oracle
@@ -7,3 +7,4 @@
 1.1.0-to-1.2.0
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
+2.1.0-to-2.2.0

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
new file mode 100644
index 0000000..63ac3be
--- /dev/null
+++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql
@@ -0,0 +1,1476 @@
+--
+-- PostgreSQL database dump
+--
+
+SET statement_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = off;
+SET check_function_bodies = false;
+SET client_min_messages = warning;
+SET escape_string_warning = off;
+
+SET search_path = public, pg_catalog;
+
+SET default_tablespace = '';
+
+SET default_with_oids = false;
+
+--
+-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "BUCKETING_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "CDS" (
+    "CD_ID" bigint NOT NULL
+);
+
+
+--
+-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "COLUMNS_V2" (
+    "CD_ID" bigint NOT NULL,
+    "COMMENT" character varying(4000),
+    "COLUMN_NAME" character varying(1000) NOT NULL,
+    "TYPE_NAME" character varying(4000),
+    "INTEGER_IDX" integer NOT NULL
+);
+
+
+--
+-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DATABASE_PARAMS" (
+    "DB_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(180) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DBS" (
+    "DB_ID" bigint NOT NULL,
+    "DESC" character varying(4000) DEFAULT NULL::character varying,
+    "DB_LOCATION_URI" character varying(4000) NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "DB_PRIVS" (
+    "DB_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "DB_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "GLOBAL_PRIVS" (
+    "USER_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "USER_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "IDXS" (
+    "INDEX_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DEFERRED_REBUILD" boolean NOT NULL,
+    "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+    "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+    "INDEX_TBL_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "ORIG_TBL_ID" bigint,
+    "SD_ID" bigint
+);
+
+
+--
+-- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "INDEX_PARAMS" (
+    "INDEX_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "NUCLEUS_TABLES" (
+    "CLASS_NAME" character varying(128) NOT NULL,
+    "TABLE_NAME" character varying(128) NOT NULL,
+    "TYPE" character varying(4) NOT NULL,
+    "OWNER" character varying(2) NOT NULL,
+    "VERSION" character varying(20) NOT NULL,
+    "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITIONS" (
+    "PART_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+    "SD_ID" bigint,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_EVENTS" (
+    "PART_NAME_ID" bigint NOT NULL,
+    "DB_NAME" character varying(128),
+    "EVENT_TIME" bigint NOT NULL,
+    "EVENT_TYPE" integer NOT NULL,
+    "PARTITION_NAME" character varying(767),
+    "TBL_NAME" character varying(128)
+);
+
+
+--
+-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEYS" (
+    "TBL_ID" bigint NOT NULL,
+    "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+    "PKEY_NAME" character varying(128) NOT NULL,
+    "PKEY_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_KEY_VALS" (
+    "PART_ID" bigint NOT NULL,
+    "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PARTITION_PARAMS" (
+    "PART_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_PRIVS" (
+    "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_PRIVS" (
+    "PART_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_ID" bigint,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PART_PRIV" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLES" (
+    "ROLE_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "ROLE_MAP" (
+    "ROLE_GRANT_ID" bigint NOT NULL,
+    "ADD_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "ROLE_ID" bigint
+);
+
+
+--
+-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SDS" (
+    "SD_ID" bigint NOT NULL,
+    "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "IS_COMPRESSED" boolean NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+    "NUM_BUCKETS" bigint NOT NULL,
+    "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+    "SERDE_ID" bigint,
+    "CD_ID" bigint,
+    "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+);
+
+
+--
+-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SD_PARAMS" (
+    "SD_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SEQUENCE_TABLE" (
+    "SEQUENCE_NAME" character varying(255) NOT NULL,
+    "NEXT_VAL" bigint NOT NULL
+);
+
+
+--
+-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDES" (
+    "SERDE_ID" bigint NOT NULL,
+    "NAME" character varying(128) DEFAULT NULL::character varying,
+    "SLIB" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SERDE_PARAMS" (
+    "SERDE_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "SORT_COLS" (
+    "SD_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
+    "ORDER" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TABLE_PARAMS" (
+    "TBL_ID" bigint NOT NULL,
+    "PARAM_KEY" character varying(256) NOT NULL,
+    "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBLS" (
+    "TBL_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "DB_ID" bigint,
+    "LAST_ACCESS_TIME" bigint NOT NULL,
+    "OWNER" character varying(767) DEFAULT NULL::character varying,
+    "RETENTION" bigint NOT NULL,
+    "SD_ID" bigint,
+    "TBL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "VIEW_EXPANDED_TEXT" text,
+    "VIEW_ORIGINAL_TEXT" text
+);
+
+
+--
+-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_COL_PRIVS" (
+    "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+    "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TBL_PRIVS" (
+    "TBL_GRANT_ID" bigint NOT NULL,
+    "CREATE_TIME" bigint NOT NULL,
+    "GRANT_OPTION" smallint NOT NULL,
+    "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+    "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+    "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+    "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+    "TBL_ID" bigint
+);
+
+
+--
+-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPES" (
+    "TYPES_ID" bigint NOT NULL,
+    "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+    "TYPE1" character varying(767) DEFAULT NULL::character varying,
+    "TYPE2" character varying(767) DEFAULT NULL::character varying
+);
+
+
+--
+-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "TYPE_FIELDS" (
+    "TYPE_NAME" bigint NOT NULL,
+    "COMMENT" character varying(256) DEFAULT NULL::character varying,
+    "FIELD_NAME" character varying(128) NOT NULL,
+    "FIELD_TYPE" character varying(767) NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST" (
+    "STRING_LIST_ID" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+    "STRING_LIST_ID" bigint NOT NULL,
+    "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_NAMES" (
+    "SD_ID" bigint NOT NULL,
+    "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+    "SD_ID" bigint NOT NULL,
+    "STRING_LIST_ID_KID" bigint NOT NULL,
+    "LOCATION" character varying(4000) DEFAULT NULL::character varying
+);
+
+CREATE TABLE "SKEWED_VALUES" (
+    "SD_ID_OID" bigint NOT NULL,
+    "STRING_LIST_ID_EID" bigint NOT NULL,
+    "INTEGER_IDX" bigint NOT NULL
+);
+
+
+--
+-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE  "MASTER_KEYS"
+(
+    "KEY_ID" SERIAL,
+    "MASTER_KEY" varchar(767) NULL,
+    PRIMARY KEY ("KEY_ID")
+);
+
+CREATE TABLE  "DELEGATION_TOKENS"
+(
+    "TOKEN_IDENT" varchar(767) NOT NULL,
+    "TOKEN" varchar(767) NULL,
+    PRIMARY KEY ("TOKEN_IDENT")
+);
+
+CREATE TABLE "TAB_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "TBL_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for VERSION
+--
+CREATE TABLE "VERSION" (
+  "VER_ID" bigint,
+  "SCHEMA_VERSION" character varying(127) NOT NULL,
+  "VERSION_COMMENT" character varying(255) NOT NULL
+);
+
+--
+-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE TABLE "PART_COL_STATS" (
+ "CS_ID" bigint NOT NULL,
+ "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+ "TABLE_NAME" character varying(128) DEFAULT NULL::character varying,
+ "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+ "COLUMN_NAME" character varying(1000) DEFAULT NULL::character varying,
+ "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+ "PART_ID" bigint NOT NULL,
+ "LONG_LOW_VALUE" bigint,
+ "LONG_HIGH_VALUE" bigint,
+ "DOUBLE_LOW_VALUE" double precision,
+ "DOUBLE_HIGH_VALUE" double precision,
+ "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+ "NUM_NULLS" bigint NOT NULL,
+ "NUM_DISTINCTS" bigint,
+ "AVG_COL_LEN" double precision,
+ "MAX_COL_LEN" bigint,
+ "NUM_TRUES" bigint,
+ "NUM_FALSES" bigint,
+ "LAST_ANALYZED" bigint NOT NULL
+);
+
+--
+-- Table structure for FUNCS
+--
+CREATE TABLE "FUNCS" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "CLASS_NAME" VARCHAR(4000),
+  "CREATE_TIME" INTEGER NOT NULL,
+  "DB_ID" BIGINT,
+  "FUNC_NAME" VARCHAR(128),
+  "FUNC_TYPE" INTEGER NOT NULL,
+  "OWNER_NAME" VARCHAR(128),
+  "OWNER_TYPE" VARCHAR(10),
+  PRIMARY KEY ("FUNC_ID")
+);
+
+--
+-- Table structure for FUNC_RU
+--
+CREATE TABLE "FUNC_RU" (
+  "FUNC_ID" BIGINT NOT NULL,
+  "RESOURCE_TYPE" INTEGER NOT NULL,
+  "RESOURCE_URI" VARCHAR(4000),
+  "INTEGER_IDX" INTEGER NOT NULL,
+  PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+);
+
+CREATE TABLE "NOTIFICATION_LOG"
+(
+    "NL_ID" BIGINT NOT NULL,
+    "EVENT_ID" BIGINT NOT NULL,
+    "EVENT_TIME" INTEGER NOT NULL,
+    "EVENT_TYPE" VARCHAR(32) NOT NULL,
+    "DB_NAME" VARCHAR(128),
+    "TBL_NAME" VARCHAR(128),
+    "MESSAGE" text,
+    PRIMARY KEY ("NL_ID")
+);
+
+CREATE TABLE "NOTIFICATION_SEQUENCE"
+(
+    "NNI_ID" BIGINT NOT NULL,
+    "NEXT_EVENT_ID" BIGINT NOT NULL,
+    PRIMARY KEY ("NNI_ID")
+);
+
+CREATE TABLE "KEY_CONSTRAINTS"
+(
+  "CHILD_CD_ID" BIGINT,
+  "CHILD_INTEGER_IDX" BIGINT,
+  "CHILD_TBL_ID" BIGINT,
+  "PARENT_CD_ID" BIGINT NOT NULL,
+  "PARENT_INTEGER_IDX" BIGINT NOT NULL,
+  "PARENT_TBL_ID" BIGINT NOT NULL,
+  "POSITION" BIGINT NOT NULL,
+  "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
+  "CONSTRAINT_TYPE" SMALLINT NOT NULL,
+  "UPDATE_RULE" SMALLINT,
+  "DELETE_RULE"	SMALLINT,
+  "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
+  PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
+) ;
+
+CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
+
+--
+-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "CDS"
+    ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+
+
+--
+-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+
+
+--
+-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+
+
+--
+-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+
+
+--
+-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "GLOBAL_PRIVS"
+    ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+
+
+--
+-- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+
+
+--
+-- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+
+
+--
+-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "NUCLEUS_TABLES"
+    ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+
+
+--
+-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+
+
+--
+-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_EVENTS"
+    ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+
+
+--
+-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+
+
+--
+-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+
+
+--
+-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+
+
+--
+-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+
+
+--
+-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+
+
+--
+-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+
+
+--
+-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLES"
+    ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+
+
+--
+-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+
+
+--
+-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+
+
+--
+-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+
+
+--
+-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SEQUENCE_TABLE"
+    ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+
+
+--
+-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDES"
+    ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+
+
+--
+-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+
+
+--
+-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+
+--
+-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+
+
+--
+-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+
+
+--
+-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+
+
+--
+-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+
+
+--
+-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+
+--
+-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+
+--
+-- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+
+
+--
+-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+
+
+--
+-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+
+
+--
+-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "DBS"
+    ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME");
+
+
+--
+-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "TYPES"
+    ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+
+
+--
+-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+
+
+--
+-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+
+
+--
+-- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+
+
+--
+-- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+
+
+--
+-- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+
+
+--
+-- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+
+
+--
+-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+
+
+--
+-- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+
+
+--
+-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+
+
+--
+-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+
+
+--
+-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+
+
+--
+-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+
+
+--
+-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+
+
+--
+-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+
+
+--
+-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+
+
+--
+-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+
+
+--
+-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+
+
+--
+-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+
+
+--
+-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+
+
+--
+-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+
+--
+-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+
+--
+-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+
+--
+-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+
+--
+-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+
+--
+-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+
+
+ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+    ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_NAMES"
+    ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+    ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+
+ALTER TABLE ONLY "SKEWED_VALUES"
+    ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "BUCKETING_COLS"
+    ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "COLUMNS_V2"
+    ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DATABASE_PARAMS"
+    ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "DB_PRIVS"
+    ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "IDXS"
+    ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "INDEX_PARAMS"
+    ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITIONS"
+    ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEYS"
+    ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_KEY_VALS"
+    ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PARTITION_PARAMS"
+    ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_COL_PRIVS"
+    ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "PART_PRIVS"
+    ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+--
+-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "ROLE_MAP"
+    ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+
+
+--
+-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SDS"
+    ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SD_PARAMS"
+    ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SERDE_PARAMS"
+    ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+
+
+--
+-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "SORT_COLS"
+    ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TABLE_PARAMS"
+    ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+
+
+--
+-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBLS"
+    ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_COL_PRIVS"
+    ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TBL_PRIVS"
+    ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+
+ALTER TABLE ONLY "TYPE_FIELDS"
+    ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+
+--
+-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+
+
+--
+-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+--
+ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+
+
+ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+
+-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNCS"
+    ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+
+-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ALTER TABLE ONLY "FUNC_RU"
+    ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+
+--
+-- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+--
+
+REVOKE ALL ON SCHEMA public FROM PUBLIC;
+GRANT ALL ON SCHEMA public TO PUBLIC;
+
+--
+-- PostgreSQL database dump complete
+--
+
+------------------------------
+-- Transaction and lock tables
+------------------------------
+\i hive-txn-schema-2.2.0.postgres.sql;
+
+-- -----------------------------------------------------------------
+-- Record schema version. Should be the last step in the init script
+-- -----------------------------------------------------------------
+INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '2.2.0', 'Hive release version 2.2.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
new file mode 100644
index 0000000..262b93e
--- /dev/null
+++ b/metastore/scripts/upgrade/postgres/hive-txn-schema-2.2.0.postgres.sql
@@ -0,0 +1,129 @@
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL,
+  TXN_AGENT_INFO varchar(128),
+  TXN_META_INFO varchar(128),
+  TXN_HEARTBEAT_COUNT integer
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767) DEFAULT NULL,
+  TC_OPERATION_TYPE char(1) NOT NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767) DEFAULT NULL,
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  HL_HEARTBEAT_COUNT integer,
+  HL_AGENT_INFO varchar(128),
+  HL_BLOCKEDBY_EXT_ID bigint,
+  HL_BLOCKEDBY_INT_ID bigint,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128),
+  CQ_HIGHEST_TXN_ID bigint,
+  CQ_META_INFO bytea,
+  CQ_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE COMPLETED_COMPACTIONS (
+  CC_ID bigint PRIMARY KEY,
+  CC_DATABASE varchar(128) NOT NULL,
+  CC_TABLE varchar(128) NOT NULL,
+  CC_PARTITION varchar(767),
+  CC_STATE char(1) NOT NULL,
+  CC_TYPE char(1) NOT NULL,
+  CC_WORKER_ID varchar(128),
+  CC_START bigint,
+  CC_END bigint,
+  CC_RUN_AS varchar(128),
+  CC_HIGHEST_TXN_ID bigint,
+  CC_META_INFO bytea,
+  CC_HADOOP_JOB_ID varchar(32)
+);
+
+CREATE TABLE AUX_TABLE (
+  MT_KEY1 varchar(128) NOT NULL,
+  MT_KEY2 bigint NOT NULL,
+  MT_COMMENT varchar(255),
+  PRIMARY KEY(MT_KEY1, MT_KEY2)
+);
+
+CREATE TABLE WRITE_SET (
+  WS_DATABASE varchar(128) NOT NULL,
+  WS_TABLE varchar(128) NOT NULL,
+  WS_PARTITION varchar(767),
+  WS_TXNID bigint NOT NULL,
+  WS_COMMIT_ID bigint NOT NULL,
+  WS_OPERATION_TYPE char(1) NOT NULL
+);

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
new file mode 100644
index 0000000..0b4591d
--- /dev/null
+++ b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql
@@ -0,0 +1,5 @@
+SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0';
+
+UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1;
+SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0';
+

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/metastore/scripts/upgrade/postgres/upgrade.order.postgres
----------------------------------------------------------------------
diff --git a/metastore/scripts/upgrade/postgres/upgrade.order.postgres b/metastore/scripts/upgrade/postgres/upgrade.order.postgres
index e0bb692..420174a 100644
--- a/metastore/scripts/upgrade/postgres/upgrade.order.postgres
+++ b/metastore/scripts/upgrade/postgres/upgrade.order.postgres
@@ -11,3 +11,4 @@
 1.1.0-to-1.2.0
 1.2.0-to-2.0.0
 2.0.0-to-2.1.0
+2.1.0-to-2.2.0

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/orc/pom.xml
----------------------------------------------------------------------
diff --git a/orc/pom.xml b/orc/pom.xml
index cc27077..e6ec0f4 100644
--- a/orc/pom.xml
+++ b/orc/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/packaging/pom.xml
----------------------------------------------------------------------
diff --git a/packaging/pom.xml b/packaging/pom.xml
index 7f668b3..6e5a396 100644
--- a/packaging/pom.xml
+++ b/packaging/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 41522de..117aec9 100644
--- a/pom.xml
+++ b/pom.xml
@@ -21,7 +21,7 @@
   </parent>
   <groupId>org.apache.hive</groupId>
   <artifactId>hive</artifactId>
-  <version>2.1.0-SNAPSHOT</version>
+  <version>2.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <name>Hive</name>

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 8b2d0e6..db03978 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/serde/pom.xml
----------------------------------------------------------------------
diff --git a/serde/pom.xml b/serde/pom.xml
index 9f50764..7673eb7 100644
--- a/serde/pom.xml
+++ b/serde/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/service-rpc/pom.xml
----------------------------------------------------------------------
diff --git a/service-rpc/pom.xml b/service-rpc/pom.xml
index 6df90d5..086fa5f 100644
--- a/service-rpc/pom.xml
+++ b/service-rpc/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/service/pom.xml
----------------------------------------------------------------------
diff --git a/service/pom.xml b/service/pom.xml
index ae82cd3..ecea719 100644
--- a/service/pom.xml
+++ b/service/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/shims/0.23/pom.xml
----------------------------------------------------------------------
diff --git a/shims/0.23/pom.xml b/shims/0.23/pom.xml
index a39f7f0..cb1999e 100644
--- a/shims/0.23/pom.xml
+++ b/shims/0.23/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/shims/aggregator/pom.xml
----------------------------------------------------------------------
diff --git a/shims/aggregator/pom.xml b/shims/aggregator/pom.xml
index 70b1c58..e4899ae 100644
--- a/shims/aggregator/pom.xml
+++ b/shims/aggregator/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/shims/common/pom.xml
----------------------------------------------------------------------
diff --git a/shims/common/pom.xml b/shims/common/pom.xml
index fd90689..700bd22 100644
--- a/shims/common/pom.xml
+++ b/shims/common/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/shims/pom.xml
----------------------------------------------------------------------
diff --git a/shims/pom.xml b/shims/pom.xml
index e3c174a..8741585 100644
--- a/shims/pom.xml
+++ b/shims/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/shims/scheduler/pom.xml
----------------------------------------------------------------------
diff --git a/shims/scheduler/pom.xml b/shims/scheduler/pom.xml
index b36c123..9141c1e 100644
--- a/shims/scheduler/pom.xml
+++ b/shims/scheduler/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 


[57/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join0.q.out b/ql/src/test/results/clientpositive/join0.q.out
new file mode 100644
index 0000000..59122e2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/join0.q.out
@@ -0,0 +1,238 @@
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key < 10) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string), _col1 (type: string)
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key < 10) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string), _col1 (type: string)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+              sort order: ++++
+              Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COM
 PLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}}],"Reduce Operator Tree:":{"Join Operator":{"condition map:":[{"":"Inner Join 0 to 1"}],"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","outp
 ut format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}}}}}},"Stage-2":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)","sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}}}}],"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoo
 p.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}
+Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
+PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	2	val_2
+0	val_0	2	val_2
+0	val_0	2	val_2
+0	val_0	4	val_4
+0	val_0	4	val_4
+0	val_0	4	val_4
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	8	val_8
+0	val_0	8	val_8
+0	val_0	8	val_8
+0	val_0	9	val_9
+0	val_0	9	val_9
+0	val_0	9	val_9
+2	val_2	0	val_0
+2	val_2	0	val_0
+2	val_2	0	val_0
+2	val_2	2	val_2
+2	val_2	4	val_4
+2	val_2	5	val_5
+2	val_2	5	val_5
+2	val_2	5	val_5
+2	val_2	8	val_8
+2	val_2	9	val_9
+4	val_4	0	val_0
+4	val_4	0	val_0
+4	val_4	0	val_0
+4	val_4	2	val_2
+4	val_4	4	val_4
+4	val_4	5	val_5
+4	val_4	5	val_5
+4	val_4	5	val_5
+4	val_4	8	val_8
+4	val_4	9	val_9
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	2	val_2
+5	val_5	2	val_2
+5	val_5	2	val_2
+5	val_5	4	val_4
+5	val_5	4	val_4
+5	val_5	4	val_4
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	8	val_8
+5	val_5	8	val_8
+5	val_5	8	val_8
+5	val_5	9	val_9
+5	val_5	9	val_9
+5	val_5	9	val_9
+8	val_8	0	val_0
+8	val_8	0	val_0
+8	val_8	0	val_0
+8	val_8	2	val_2
+8	val_8	4	val_4
+8	val_8	5	val_5
+8	val_8	5	val_5
+8	val_8	5	val_5
+8	val_8	8	val_8
+8	val_8	9	val_9
+9	val_9	0	val_0
+9	val_9	0	val_0
+9	val_9	0	val_0
+9	val_9	2	val_2
+9	val_9	4	val_4
+9	val_9	5	val_5
+9	val_9	5	val_5
+9	val_9	5	val_5
+9	val_9	8	val_8
+9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
deleted file mode 100644
index 8447e86..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.7.out
+++ /dev/null
@@ -1,361 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5520                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [51], [103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, [103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
deleted file mode 100644
index d1b9598..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.java1.8.out
+++ /dev/null
@@ -1,389 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            src
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	totalSize           	5520                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [51], [103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
new file mode 100644
index 0000000..d4681b7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_10.q.out
@@ -0,0 +1,359 @@
+PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
+
+-- list bucketing DML: static partition. multiple skewed columns.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key) on ('484','51','103')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
+
+-- list bucketing DML: static partition. multiple skewed columns.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key) on ('484','51','103')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+  Stage: Stage-3
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              partition_columns.types string:string
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-5
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              partition_columns.types string:string
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	4812                
+	totalSize           	5520                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key]               	 
+Skewed Values:      	[[484], [51], [103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
deleted file mode 100644
index b58d17c..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
+++ /dev/null
@@ -1,329 +0,0 @@
-PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (value) on ('val_466','val_287','val_82')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (value) on ('val_466','val_287','val_82')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5522                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[value]             	 
-Skewed Values:      	[[val_466], [val_287], [val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[val_82]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_82, [val_287]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_287, [val_466]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain extended
-select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 4812
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 5522
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: (value = 'val_466') (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), 'val_466' (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-466	val_466
-466	val_466
-466	val_466
-PREHOOK: query: drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part


[47/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
deleted file mode 100644
index 752ea4e..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
+++ /dev/null
@@ -1,813 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [103]]      	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, [103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10786               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [103]]      	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484, [103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 4
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10786
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
deleted file mode 100644
index 599d3b0..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.8.out
+++ /dev/null
@@ -1,915 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns. merge.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103:
--- 99 000000_0
--- 99 000001_0
--- after merge
--- 142 000000_0
--- ds=2008-04-08/hr=11/key=484:
--- 87 000000_0
--- 87 000001_0
--- after merge
--- 118 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [103]]      	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              partition_columns.types string:string
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10786               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [103]]      	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_static_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '11'
-               =
-                  TOK_TABLE_OR_COL
-                     key
-                  '484'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               'val_484'
-
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 4
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10786
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part


[56/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
deleted file mode 100644
index 00a6235..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.8.out
+++ /dev/null
@@ -1,424 +0,0 @@
-PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (value) on ('val_466','val_287','val_82')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (value) on ('val_466','val_287','val_82')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            src
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_static_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_static_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_static_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5522                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[value]             	 
-Skewed Values:      	[[val_466], [val_287], [val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[val_287]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_287, [val_82]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_82, [val_466]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain extended
-select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_static_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         and
-            and
-               =
-                  TOK_TABLE_OR_COL
-                     ds
-                  '2008-04-08'
-               =
-                  TOK_TABLE_OR_COL
-                     hr
-                  '11'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               "val_466"
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_static_part
-            Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: (value = 'val_466') (type: boolean)
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), 'val_466' (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1
-                        columns.types string:string
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_466
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 4812
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 5522
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Truncated Path -> Alias:
-        /list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466 [$hdt$_0:list_bucketing_static_part]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-466	val_466
-466	val_466
-466	val_466
-PREHOOK: query: drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
new file mode 100644
index 0000000..ecf54a8
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out
@@ -0,0 +1,327 @@
+PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+-- list bucketing DML: static partition. multiple skewed columns.
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (value) on ('val_466','val_287','val_82')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+-- list bucketing DML: static partition. multiple skewed columns.
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (value) on ('val_466','val_287','val_82')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	4812                
+	totalSize           	5522                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[value]             	 
+Skewed Values:      	[[val_466], [val_287], [val_82]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[val_287]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_287, [val_82]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_82, [val_466]=/list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain extended
+select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              numFiles 4
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 4812
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 5522
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_static_part
+          Statistics: Num rows: 500 Data size: 4812 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: (value = 'val_466') (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), 'val_466' (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select key, value from list_bucketing_static_part where ds='2008-04-08' and hr='11' and value = "val_466"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+466	val_466
+466	val_466
+466	val_466
+PREHOOK: query: drop table list_bucketing_static_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: drop table list_bucketing_static_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
deleted file mode 100644
index 0be7f4e..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
+++ /dev/null
@@ -1,426 +0,0 @@
-PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_mul_col
-PREHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '11')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '11')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: '1' (type: string), key (type: string), '1' (type: string), value (type: string), '1' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns col1,col2,col3,col4,col5
-                      columns.comments 
-                      columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_mul_col
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_mul_col
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col1 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col3 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col5 EXPRESSION []
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_mul_col
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-# col_name            	data_type           	comment             
-	 	 
-col1                	string              	                    
-col2                	string              	                    
-col3                	string              	                    
-col4                	string              	                    
-col5                	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_mul_col	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	6312                
-	totalSize           	7094                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[col2, col4]        	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=82/col4=val_82, [466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=287/col4=val_287}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns col1,col2,col3,col4,col5
-              columns.comments 
-              columns.types string:string:string:string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_mul_col
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 6312
-              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 7094
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-            name: default.list_bucketing_mul_col
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_mul_col
-          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
-            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: col1 (type: string), '466' (type: string), col3 (type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1	466	1	val_466	1	2008-04-08	11
-1	466	1	val_466	1	2008-04-08	11
-1	466	1	val_466	1	2008-04-08	11
-PREHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns col1,col2,col3,col4,col5
-              columns.comments 
-              columns.types string:string:string:string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_mul_col
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 6312
-              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 7094
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-            name: default.list_bucketing_mul_col
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_mul_col
-          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((col2 = '382') and (col4 = 'val_382')) (type: boolean)
-            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: col1 (type: string), '382' (type: string), col3 (type: string), 'val_382' (type: string), col5 (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1	382	1	val_382	1	2008-04-08	11
-1	382	1	val_382	1	2008-04-08	11
-PREHOOK: query: drop table list_bucketing_mul_col
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: drop table list_bucketing_mul_col
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Output: default@list_bucketing_mul_col

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
deleted file mode 100644
index 6d2298b..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.8.out
+++ /dev/null
@@ -1,596 +0,0 @@
-PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_mul_col
-PREHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '11')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '11')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            src
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_mul_col
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            1
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            1
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            1
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: UDFToString(1) (type: string), key (type: string), UDFToString(1) (type: string), value (type: string), UDFToString(1) (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=11/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns col1,col2,col3,col4,col5
-                      columns.comments 
-                      columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_mul_col
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_mul_col
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [$hdt$_0:src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col1 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col3 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col5 EXPRESSION []
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_mul_col
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-# col_name            	data_type           	comment             
-	 	 
-col1                	string              	                    
-col2                	string              	                    
-col3                	string              	                    
-col4                	string              	                    
-col5                	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_mul_col	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	6312                
-	totalSize           	7094                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[col2, col4]        	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=287/col4=val_287, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=82/col4=val_82}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_mul_col
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '11'
-               =
-                  TOK_TABLE_OR_COL
-                     col2
-                  "466"
-            =
-               TOK_TABLE_OR_COL
-                  col4
-               "val_466"
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_mul_col
-            Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: col1 (type: string), '466' (type: string), col3 (type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' (type: string), '11' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                        columns.types string:string:string:string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: col4=val_466
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns col1,col2,col3,col4,col5
-              columns.comments 
-              columns.types string:string:string:string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_mul_col
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 6312
-              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 7094
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-            name: default.list_bucketing_mul_col
-      Truncated Path -> Alias:
-        /list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466 [list_bucketing_mul_col]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1	466	1	val_466	1	2008-04-08	11
-1	466	1	val_466	1	2008-04-08	11
-1	466	1	val_466	1	2008-04-08	11
-PREHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_mul_col
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '11'
-               =
-                  TOK_TABLE_OR_COL
-                     col2
-                  "382"
-            =
-               TOK_TABLE_OR_COL
-                  col4
-               "val_382"
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_mul_col
-            Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((col2 = '382') and (col4 = 'val_382')) (type: boolean)
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: col1 (type: string), '382' (type: string), col3 (type: string), 'val_382' (type: string), col5 (type: string), '2008-04-08' (type: string), '11' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                        columns.types string:string:string:string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns col1,col2,col3,col4,col5
-              columns.comments 
-              columns.types string:string:string:string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_mul_col
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 6312
-              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 7094
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-            name: default.list_bucketing_mul_col
-      Truncated Path -> Alias:
-        /list_bucketing_mul_col/ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [list_bucketing_mul_col]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1	382	1	val_382	1	2008-04-08	11
-1	382	1	val_382	1	2008-04-08	11
-PREHOOK: query: drop table list_bucketing_mul_col
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: drop table list_bucketing_mul_col
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Output: default@list_bucketing_mul_col


[60/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out b/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out
deleted file mode 100644
index 52b09d4..0000000
--- a/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.7.out
+++ /dev/null
@@ -1,179 +0,0 @@
-PREHOOK: query: -- Verify that nullable fields properly work
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE test_serializer(string1 STRING,
-                             int1 INT,
-                             tinyint1 TINYINT,
-                             smallint1 SMALLINT,
-                             bigint1 BIGINT,
-                             boolean1 BOOLEAN,
-                             float1 FLOAT,
-                             double1 DOUBLE,
-                             list1 ARRAY<STRING>,
-                             map1 MAP<STRING,INT>,
-                             struct1 STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>,
-                             enum1 STRING,
-                             nullableint INT,
-                             bytes1 BINARY,
-                             fixed1 BINARY)
- ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
- STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_serializer
-POSTHOOK: query: -- Verify that nullable fields properly work
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE test_serializer(string1 STRING,
-                             int1 INT,
-                             tinyint1 TINYINT,
-                             smallint1 SMALLINT,
-                             bigint1 BIGINT,
-                             boolean1 BOOLEAN,
-                             float1 FLOAT,
-                             double1 DOUBLE,
-                             list1 ARRAY<STRING>,
-                             map1 MAP<STRING,INT>,
-                             struct1 STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>,
-                             enum1 STRING,
-                             nullableint INT,
-                             bytes1 BINARY,
-                             fixed1 BINARY)
- ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
- STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_serializer
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@test_serializer
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@test_serializer
-PREHOOK: query: CREATE TABLE as_avro
-  ROW FORMAT
-  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-  STORED AS
-  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-  TBLPROPERTIES (
-    'avro.schema.literal'='{
-      "namespace": "com.howdy",
-      "name": "some_schema",
-      "type": "record",
-      "fields": [
-        { "name": "string1", "type": ["null", "string"] },
-        { "name": "int1", "type": ["null", "int"] },
-        { "name": "tinyint1", "type": ["null", "int"] },
-        { "name": "smallint1", "type": ["null", "int"] },
-        { "name": "bigint1", "type": ["null", "long"] },
-        { "name": "boolean1", "type": ["null", "boolean"] },
-        { "name": "float1", "type": ["null", "float"] },
-        { "name": "double1", "type": ["null", "double"] },
-        { "name": "list1", "type": ["null", {"type": "array", "items": "string"}] },
-        { "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
-        { "name": "struct1", "type": ["null", {"type": "record", "name": "struct1_name", "fields": [
-          { "name": "sInt", "type": "int" },
-          { "name": "sBoolean", "type": "boolean" },
-          { "name": "sString", "type": "string" }
-        ]}] },
-        { "name": "enum1", "type": ["null", {"type": "enum", "name": "enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
-        { "name": "nullableint", "type": ["null", "int"] },
-        { "name": "bytes1", "type": ["null", "bytes"] },
-        { "name": "fixed1", "type": ["null", {"type": "fixed", "name": "threebytes", "size": 3}] }
-      ]
-    }'
-  )
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@as_avro
-POSTHOOK: query: CREATE TABLE as_avro
-  ROW FORMAT
-  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-  STORED AS
-  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-  TBLPROPERTIES (
-    'avro.schema.literal'='{
-      "namespace": "com.howdy",
-      "name": "some_schema",
-      "type": "record",
-      "fields": [
-        { "name": "string1", "type": ["null", "string"] },
-        { "name": "int1", "type": ["null", "int"] },
-        { "name": "tinyint1", "type": ["null", "int"] },
-        { "name": "smallint1", "type": ["null", "int"] },
-        { "name": "bigint1", "type": ["null", "long"] },
-        { "name": "boolean1", "type": ["null", "boolean"] },
-        { "name": "float1", "type": ["null", "float"] },
-        { "name": "double1", "type": ["null", "double"] },
-        { "name": "list1", "type": ["null", {"type": "array", "items": "string"}] },
-        { "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
-        { "name": "struct1", "type": ["null", {"type": "record", "name": "struct1_name", "fields": [
-          { "name": "sInt", "type": "int" },
-          { "name": "sBoolean", "type": "boolean" },
-          { "name": "sString", "type": "string" }
-        ]}] },
-        { "name": "enum1", "type": ["null", {"type": "enum", "name": "enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
-        { "name": "nullableint", "type": ["null", "int"] },
-        { "name": "bytes1", "type": ["null", "bytes"] },
-        { "name": "fixed1", "type": ["null", {"type": "fixed", "name": "threebytes", "size": 3}] }
-      ]
-    }'
-  )
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@as_avro
-PREHOOK: query: INSERT OVERWRITE TABLE as_avro SELECT * FROM test_serializer
-PREHOOK: type: QUERY
-PREHOOK: Input: default@test_serializer
-PREHOOK: Output: default@as_avro
-POSTHOOK: query: INSERT OVERWRITE TABLE as_avro SELECT * FROM test_serializer
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@test_serializer
-POSTHOOK: Output: default@as_avro
-POSTHOOK: Lineage: as_avro.bigint1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:bigint1, type:bigint, comment:null), ]
-POSTHOOK: Lineage: as_avro.boolean1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:boolean1, type:boolean, comment:null), ]
-POSTHOOK: Lineage: as_avro.bytes1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:bytes1, type:binary, comment:null), ]
-POSTHOOK: Lineage: as_avro.double1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:double1, type:double, comment:null), ]
-POSTHOOK: Lineage: as_avro.enum1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:enum1, type:string, comment:null), ]
-POSTHOOK: Lineage: as_avro.fixed1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:fixed1, type:binary, comment:null), ]
-POSTHOOK: Lineage: as_avro.float1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:float1, type:float, comment:null), ]
-POSTHOOK: Lineage: as_avro.int1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:int1, type:int, comment:null), ]
-POSTHOOK: Lineage: as_avro.list1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:list1, type:array<string>, comment:null), ]
-POSTHOOK: Lineage: as_avro.map1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:map1, type:map<string,int>, comment:null), ]
-POSTHOOK: Lineage: as_avro.nullableint SIMPLE [(test_serializer)test_serializer.FieldSchema(name:nullableint, type:int, comment:null), ]
-POSTHOOK: Lineage: as_avro.smallint1 EXPRESSION [(test_serializer)test_serializer.FieldSchema(name:smallint1, type:smallint, comment:null), ]
-POSTHOOK: Lineage: as_avro.string1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:string1, type:string, comment:null), ]
-POSTHOOK: Lineage: as_avro.struct1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:struct1, type:struct<sint:int,sboolean:boolean,sstring:string>, comment:null), ]
-POSTHOOK: Lineage: as_avro.tinyint1 EXPRESSION [(test_serializer)test_serializer.FieldSchema(name:tinyint1, type:tinyint, comment:null), ]
-PREHOOK: query: SELECT * FROM as_avro
-PREHOOK: type: QUERY
-PREHOOK: Input: default@as_avro
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM as_avro
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@as_avro
-#### A masked pattern was here ####
-why hello there	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-another record	98	4	101	9999999	false	99.89	9.0E-8	["beta"]	{"Earth":101}	{"sint":1134,"sboolean":false,"sstring":"wazzup"}	RED	NULL		ef
-third record	45	5	102	999999999	true	89.99	9.0E-14	["alpha","gamma"]	{"Earth":237,"Bob":723}	{"sint":102,"sboolean":false,"sstring":"BNL"}	GREEN	NULL		hi
-NULL	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	NULL	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	NULL	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	NULL	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	NULL	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	NULL	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	NULL	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	NULL	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	NULL	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	NULL	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	NULL	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	NULL	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	NULL		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72	NULL	bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Bob":31,"Control":86}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.8.out b/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.8.out
deleted file mode 100644
index 3690f7b..0000000
--- a/ql/src/test/results/clientpositive/avro_nullable_fields.q.java1.8.out
+++ /dev/null
@@ -1,179 +0,0 @@
-PREHOOK: query: -- Verify that nullable fields properly work
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE test_serializer(string1 STRING,
-                             int1 INT,
-                             tinyint1 TINYINT,
-                             smallint1 SMALLINT,
-                             bigint1 BIGINT,
-                             boolean1 BOOLEAN,
-                             float1 FLOAT,
-                             double1 DOUBLE,
-                             list1 ARRAY<STRING>,
-                             map1 MAP<STRING,INT>,
-                             struct1 STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>,
-                             enum1 STRING,
-                             nullableint INT,
-                             bytes1 BINARY,
-                             fixed1 BINARY)
- ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
- STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@test_serializer
-POSTHOOK: query: -- Verify that nullable fields properly work
-
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE test_serializer(string1 STRING,
-                             int1 INT,
-                             tinyint1 TINYINT,
-                             smallint1 SMALLINT,
-                             bigint1 BIGINT,
-                             boolean1 BOOLEAN,
-                             float1 FLOAT,
-                             double1 DOUBLE,
-                             list1 ARRAY<STRING>,
-                             map1 MAP<STRING,INT>,
-                             struct1 STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>,
-                             enum1 STRING,
-                             nullableint INT,
-                             bytes1 BINARY,
-                             fixed1 BINARY)
- ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
- STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@test_serializer
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@test_serializer
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@test_serializer
-PREHOOK: query: CREATE TABLE as_avro
-  ROW FORMAT
-  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-  STORED AS
-  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-  TBLPROPERTIES (
-    'avro.schema.literal'='{
-      "namespace": "com.howdy",
-      "name": "some_schema",
-      "type": "record",
-      "fields": [
-        { "name": "string1", "type": ["null", "string"] },
-        { "name": "int1", "type": ["null", "int"] },
-        { "name": "tinyint1", "type": ["null", "int"] },
-        { "name": "smallint1", "type": ["null", "int"] },
-        { "name": "bigint1", "type": ["null", "long"] },
-        { "name": "boolean1", "type": ["null", "boolean"] },
-        { "name": "float1", "type": ["null", "float"] },
-        { "name": "double1", "type": ["null", "double"] },
-        { "name": "list1", "type": ["null", {"type": "array", "items": "string"}] },
-        { "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
-        { "name": "struct1", "type": ["null", {"type": "record", "name": "struct1_name", "fields": [
-          { "name": "sInt", "type": "int" },
-          { "name": "sBoolean", "type": "boolean" },
-          { "name": "sString", "type": "string" }
-        ]}] },
-        { "name": "enum1", "type": ["null", {"type": "enum", "name": "enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
-        { "name": "nullableint", "type": ["null", "int"] },
-        { "name": "bytes1", "type": ["null", "bytes"] },
-        { "name": "fixed1", "type": ["null", {"type": "fixed", "name": "threebytes", "size": 3}] }
-      ]
-    }'
-  )
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@as_avro
-POSTHOOK: query: CREATE TABLE as_avro
-  ROW FORMAT
-  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
-  STORED AS
-  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
-  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
-  TBLPROPERTIES (
-    'avro.schema.literal'='{
-      "namespace": "com.howdy",
-      "name": "some_schema",
-      "type": "record",
-      "fields": [
-        { "name": "string1", "type": ["null", "string"] },
-        { "name": "int1", "type": ["null", "int"] },
-        { "name": "tinyint1", "type": ["null", "int"] },
-        { "name": "smallint1", "type": ["null", "int"] },
-        { "name": "bigint1", "type": ["null", "long"] },
-        { "name": "boolean1", "type": ["null", "boolean"] },
-        { "name": "float1", "type": ["null", "float"] },
-        { "name": "double1", "type": ["null", "double"] },
-        { "name": "list1", "type": ["null", {"type": "array", "items": "string"}] },
-        { "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
-        { "name": "struct1", "type": ["null", {"type": "record", "name": "struct1_name", "fields": [
-          { "name": "sInt", "type": "int" },
-          { "name": "sBoolean", "type": "boolean" },
-          { "name": "sString", "type": "string" }
-        ]}] },
-        { "name": "enum1", "type": ["null", {"type": "enum", "name": "enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
-        { "name": "nullableint", "type": ["null", "int"] },
-        { "name": "bytes1", "type": ["null", "bytes"] },
-        { "name": "fixed1", "type": ["null", {"type": "fixed", "name": "threebytes", "size": 3}] }
-      ]
-    }'
-  )
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@as_avro
-PREHOOK: query: INSERT OVERWRITE TABLE as_avro SELECT * FROM test_serializer
-PREHOOK: type: QUERY
-PREHOOK: Input: default@test_serializer
-PREHOOK: Output: default@as_avro
-POSTHOOK: query: INSERT OVERWRITE TABLE as_avro SELECT * FROM test_serializer
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@test_serializer
-POSTHOOK: Output: default@as_avro
-POSTHOOK: Lineage: as_avro.bigint1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:bigint1, type:bigint, comment:null), ]
-POSTHOOK: Lineage: as_avro.boolean1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:boolean1, type:boolean, comment:null), ]
-POSTHOOK: Lineage: as_avro.bytes1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:bytes1, type:binary, comment:null), ]
-POSTHOOK: Lineage: as_avro.double1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:double1, type:double, comment:null), ]
-POSTHOOK: Lineage: as_avro.enum1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:enum1, type:string, comment:null), ]
-POSTHOOK: Lineage: as_avro.fixed1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:fixed1, type:binary, comment:null), ]
-POSTHOOK: Lineage: as_avro.float1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:float1, type:float, comment:null), ]
-POSTHOOK: Lineage: as_avro.int1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:int1, type:int, comment:null), ]
-POSTHOOK: Lineage: as_avro.list1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:list1, type:array<string>, comment:null), ]
-POSTHOOK: Lineage: as_avro.map1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:map1, type:map<string,int>, comment:null), ]
-POSTHOOK: Lineage: as_avro.nullableint SIMPLE [(test_serializer)test_serializer.FieldSchema(name:nullableint, type:int, comment:null), ]
-POSTHOOK: Lineage: as_avro.smallint1 EXPRESSION [(test_serializer)test_serializer.FieldSchema(name:smallint1, type:smallint, comment:null), ]
-POSTHOOK: Lineage: as_avro.string1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:string1, type:string, comment:null), ]
-POSTHOOK: Lineage: as_avro.struct1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:struct1, type:struct<sint:int,sboolean:boolean,sstring:string>, comment:null), ]
-POSTHOOK: Lineage: as_avro.tinyint1 EXPRESSION [(test_serializer)test_serializer.FieldSchema(name:tinyint1, type:tinyint, comment:null), ]
-PREHOOK: query: SELECT * FROM as_avro
-PREHOOK: type: QUERY
-PREHOOK: Input: default@as_avro
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM as_avro
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@as_avro
-#### A masked pattern was here ####
-why hello there	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-another record	98	4	101	9999999	false	99.89	9.0E-8	["beta"]	{"Earth":101}	{"sint":1134,"sboolean":false,"sstring":"wazzup"}	RED	NULL		ef
-third record	45	5	102	999999999	true	89.99	9.0E-14	["alpha","gamma"]	{"Earth":237,"Bob":723}	{"sint":102,"sboolean":false,"sstring":"BNL"}	GREEN	NULL		hi
-NULL	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	NULL	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	NULL	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	NULL	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	NULL	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	NULL	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	NULL	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	NULL	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	NULL	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	NULL	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	NULL	BLUE	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	NULL	72		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	NULL		bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72	NULL	bc
-string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_nullable_fields.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_nullable_fields.q.out b/ql/src/test/results/clientpositive/avro_nullable_fields.q.out
new file mode 100644
index 0000000..2272b34
--- /dev/null
+++ b/ql/src/test/results/clientpositive/avro_nullable_fields.q.out
@@ -0,0 +1,177 @@
+PREHOOK: query: -- Verify that nullable fields properly work
+
+
+CREATE TABLE test_serializer(string1 STRING,
+                             int1 INT,
+                             tinyint1 TINYINT,
+                             smallint1 SMALLINT,
+                             bigint1 BIGINT,
+                             boolean1 BOOLEAN,
+                             float1 FLOAT,
+                             double1 DOUBLE,
+                             list1 ARRAY<STRING>,
+                             map1 MAP<STRING,INT>,
+                             struct1 STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>,
+                             enum1 STRING,
+                             nullableint INT,
+                             bytes1 BINARY,
+                             fixed1 BINARY)
+ ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
+ STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@test_serializer
+POSTHOOK: query: -- Verify that nullable fields properly work
+
+
+CREATE TABLE test_serializer(string1 STRING,
+                             int1 INT,
+                             tinyint1 TINYINT,
+                             smallint1 SMALLINT,
+                             bigint1 BIGINT,
+                             boolean1 BOOLEAN,
+                             float1 FLOAT,
+                             double1 DOUBLE,
+                             list1 ARRAY<STRING>,
+                             map1 MAP<STRING,INT>,
+                             struct1 STRUCT<sint:INT,sboolean:BOOLEAN,sstring:STRING>,
+                             enum1 STRING,
+                             nullableint INT,
+                             bytes1 BINARY,
+                             fixed1 BINARY)
+ ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY ':' MAP KEYS TERMINATED BY '#' LINES TERMINATED BY '\n'
+ STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_serializer
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@test_serializer
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/csv.txt' INTO TABLE test_serializer
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@test_serializer
+PREHOOK: query: CREATE TABLE as_avro
+  ROW FORMAT
+  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
+  STORED AS
+  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
+  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
+  TBLPROPERTIES (
+    'avro.schema.literal'='{
+      "namespace": "com.howdy",
+      "name": "some_schema",
+      "type": "record",
+      "fields": [
+        { "name": "string1", "type": ["null", "string"] },
+        { "name": "int1", "type": ["null", "int"] },
+        { "name": "tinyint1", "type": ["null", "int"] },
+        { "name": "smallint1", "type": ["null", "int"] },
+        { "name": "bigint1", "type": ["null", "long"] },
+        { "name": "boolean1", "type": ["null", "boolean"] },
+        { "name": "float1", "type": ["null", "float"] },
+        { "name": "double1", "type": ["null", "double"] },
+        { "name": "list1", "type": ["null", {"type": "array", "items": "string"}] },
+        { "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
+        { "name": "struct1", "type": ["null", {"type": "record", "name": "struct1_name", "fields": [
+          { "name": "sInt", "type": "int" },
+          { "name": "sBoolean", "type": "boolean" },
+          { "name": "sString", "type": "string" }
+        ]}] },
+        { "name": "enum1", "type": ["null", {"type": "enum", "name": "enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
+        { "name": "nullableint", "type": ["null", "int"] },
+        { "name": "bytes1", "type": ["null", "bytes"] },
+        { "name": "fixed1", "type": ["null", {"type": "fixed", "name": "threebytes", "size": 3}] }
+      ]
+    }'
+  )
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@as_avro
+POSTHOOK: query: CREATE TABLE as_avro
+  ROW FORMAT
+  SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
+  STORED AS
+  INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
+  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
+  TBLPROPERTIES (
+    'avro.schema.literal'='{
+      "namespace": "com.howdy",
+      "name": "some_schema",
+      "type": "record",
+      "fields": [
+        { "name": "string1", "type": ["null", "string"] },
+        { "name": "int1", "type": ["null", "int"] },
+        { "name": "tinyint1", "type": ["null", "int"] },
+        { "name": "smallint1", "type": ["null", "int"] },
+        { "name": "bigint1", "type": ["null", "long"] },
+        { "name": "boolean1", "type": ["null", "boolean"] },
+        { "name": "float1", "type": ["null", "float"] },
+        { "name": "double1", "type": ["null", "double"] },
+        { "name": "list1", "type": ["null", {"type": "array", "items": "string"}] },
+        { "name": "map1", "type": ["null", {"type": "map", "values": "int"}] },
+        { "name": "struct1", "type": ["null", {"type": "record", "name": "struct1_name", "fields": [
+          { "name": "sInt", "type": "int" },
+          { "name": "sBoolean", "type": "boolean" },
+          { "name": "sString", "type": "string" }
+        ]}] },
+        { "name": "enum1", "type": ["null", {"type": "enum", "name": "enum1_values", "symbols": ["BLUE", "RED", "GREEN"]}] },
+        { "name": "nullableint", "type": ["null", "int"] },
+        { "name": "bytes1", "type": ["null", "bytes"] },
+        { "name": "fixed1", "type": ["null", {"type": "fixed", "name": "threebytes", "size": 3}] }
+      ]
+    }'
+  )
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@as_avro
+PREHOOK: query: INSERT OVERWRITE TABLE as_avro SELECT * FROM test_serializer
+PREHOOK: type: QUERY
+PREHOOK: Input: default@test_serializer
+PREHOOK: Output: default@as_avro
+POSTHOOK: query: INSERT OVERWRITE TABLE as_avro SELECT * FROM test_serializer
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@test_serializer
+POSTHOOK: Output: default@as_avro
+POSTHOOK: Lineage: as_avro.bigint1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:bigint1, type:bigint, comment:null), ]
+POSTHOOK: Lineage: as_avro.boolean1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:boolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: as_avro.bytes1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:bytes1, type:binary, comment:null), ]
+POSTHOOK: Lineage: as_avro.double1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:double1, type:double, comment:null), ]
+POSTHOOK: Lineage: as_avro.enum1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:enum1, type:string, comment:null), ]
+POSTHOOK: Lineage: as_avro.fixed1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:fixed1, type:binary, comment:null), ]
+POSTHOOK: Lineage: as_avro.float1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:float1, type:float, comment:null), ]
+POSTHOOK: Lineage: as_avro.int1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:int1, type:int, comment:null), ]
+POSTHOOK: Lineage: as_avro.list1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:list1, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: as_avro.map1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:map1, type:map<string,int>, comment:null), ]
+POSTHOOK: Lineage: as_avro.nullableint SIMPLE [(test_serializer)test_serializer.FieldSchema(name:nullableint, type:int, comment:null), ]
+POSTHOOK: Lineage: as_avro.smallint1 EXPRESSION [(test_serializer)test_serializer.FieldSchema(name:smallint1, type:smallint, comment:null), ]
+POSTHOOK: Lineage: as_avro.string1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:string1, type:string, comment:null), ]
+POSTHOOK: Lineage: as_avro.struct1 SIMPLE [(test_serializer)test_serializer.FieldSchema(name:struct1, type:struct<sint:int,sboolean:boolean,sstring:string>, comment:null), ]
+POSTHOOK: Lineage: as_avro.tinyint1 EXPRESSION [(test_serializer)test_serializer.FieldSchema(name:tinyint1, type:tinyint, comment:null), ]
+PREHOOK: query: SELECT * FROM as_avro
+PREHOOK: type: QUERY
+PREHOOK: Input: default@as_avro
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM as_avro
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@as_avro
+#### A masked pattern was here ####
+why hello there	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+another record	98	4	101	9999999	false	99.89	9.0E-8	["beta"]	{"Earth":101}	{"sint":1134,"sboolean":false,"sstring":"wazzup"}	RED	NULL		ef
+third record	45	5	102	999999999	true	89.99	9.0E-14	["alpha","gamma"]	{"Earth":237,"Bob":723}	{"sint":102,"sboolean":false,"sstring":"BNL"}	GREEN	NULL		hi
+NULL	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	NULL	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	NULL	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	NULL	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	NULL	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	1412341	NULL	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	1412341	true	NULL	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	1412341	true	42.43	NULL	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	1412341	true	42.43	85.23423424	NULL	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	NULL	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		bc
+string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	NULL	BLUE	72		bc
+string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	NULL	72		bc
+string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	NULL		bc
+string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72	NULL	bc
+string	42	3	100	1412341	true	42.43	85.23423424	["alpha","beta","gamma"]	{"Earth":42,"Control":86,"Bob":31}	{"sint":17,"sboolean":true,"sstring":"Abe Linkedin"}	BLUE	72		NULL

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_timestamp.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_timestamp.q.java1.7.out b/ql/src/test/results/clientpositive/avro_timestamp.q.java1.7.out
deleted file mode 100644
index d2d3b7c..0000000
--- a/ql/src/test/results/clientpositive/avro_timestamp.q.java1.7.out
+++ /dev/null
@@ -1,134 +0,0 @@
-PREHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
--- EXCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_timestamp_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
--- EXCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_timestamp_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_timestamp
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_timestamp
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_timestamp_casts
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_timestamp_casts
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_timestamp_staging
-POSTHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_timestamp_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' OVERWRITE INTO TABLE avro_timestamp_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_timestamp_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' OVERWRITE INTO TABLE avro_timestamp_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_timestamp_staging
-PREHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-  PARTITIONED BY (p1 int, p2 timestamp)
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-  STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_timestamp
-POSTHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-  PARTITIONED BY (p1 int, p2 timestamp)
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-  STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_timestamp
-PREHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp_staging
-PREHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-POSTHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp_staging
-POSTHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).d SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:d, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).l1 SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:l1, type:array<timestamp>, comment:null), ]
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).m1 SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:m1, type:map<string,timestamp>, comment:null), ]
-PREHOOK: query: SELECT * FROM avro_timestamp
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123	{"foo":"1980-12-16 07:08:09.123","bar":"1998-05-07 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-1947-02-11 07:08:09.123	{"baz":"1921-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-PREHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-1947-02-11 07:08:09.123	1
-2012-02-21 07:08:09.123	1
-2014-02-11 07:08:09.123	1
-8200-02-11 07:08:09.123	1
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123	{"foo":"1980-12-16 07:08:09.123","bar":"1998-05-07 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123	{"foo":"1980-12-16 07:08:09.123","bar":"1998-05-07 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-1947-02-11 07:08:09.123	{"baz":"1921-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_timestamp.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_timestamp.q.java1.8.out b/ql/src/test/results/clientpositive/avro_timestamp.q.java1.8.out
deleted file mode 100644
index 4c38347..0000000
--- a/ql/src/test/results/clientpositive/avro_timestamp.q.java1.8.out
+++ /dev/null
@@ -1,134 +0,0 @@
-PREHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
--- EXCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_timestamp_staging
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
--- EXCLUDE_OS_WINDOWS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE avro_timestamp_staging
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_timestamp
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_timestamp
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE avro_timestamp_casts
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE avro_timestamp_casts
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_timestamp_staging
-POSTHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-   STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_timestamp_staging
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' OVERWRITE INTO TABLE avro_timestamp_staging
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@avro_timestamp_staging
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' OVERWRITE INTO TABLE avro_timestamp_staging
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@avro_timestamp_staging
-PREHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-  PARTITIONED BY (p1 int, p2 timestamp)
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-  STORED AS AVRO
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@avro_timestamp
-POSTHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
-  PARTITIONED BY (p1 int, p2 timestamp)
-  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
-  STORED AS AVRO
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@avro_timestamp
-PREHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp_staging
-PREHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-POSTHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp_staging
-POSTHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).d SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:d, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).l1 SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:l1, type:array<timestamp>, comment:null), ]
-POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).m1 SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:m1, type:map<string,timestamp>, comment:null), ]
-PREHOOK: query: SELECT * FROM avro_timestamp
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123	{"bar":"1998-05-07 07:08:09.123","foo":"1980-12-16 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-1947-02-11 07:08:09.123	{"baz":"1921-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-PREHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-1947-02-11 07:08:09.123	1
-2012-02-21 07:08:09.123	1
-2014-02-11 07:08:09.123	1
-8200-02-11 07:08:09.123	1
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123	{"bar":"1998-05-07 07:08:09.123","foo":"1980-12-16 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-2012-02-21 07:08:09.123	{"bar":"1998-05-07 07:08:09.123","foo":"1980-12-16 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-1947-02-11 07:08:09.123	{"baz":"1921-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
-PREHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@avro_timestamp
-PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@avro_timestamp
-POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
-#### A masked pattern was here ####
-8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/avro_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_timestamp.q.out b/ql/src/test/results/clientpositive/avro_timestamp.q.out
new file mode 100644
index 0000000..868807a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/avro_timestamp.q.out
@@ -0,0 +1,132 @@
+PREHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
+-- EXCLUDE_OS_WINDOWS
+
+DROP TABLE avro_timestamp_staging
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows.
+-- EXCLUDE_OS_WINDOWS
+
+DROP TABLE avro_timestamp_staging
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE avro_timestamp
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE avro_timestamp
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE avro_timestamp_casts
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE avro_timestamp_casts
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
+   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
+   STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_timestamp_staging
+POSTHOOK: query: CREATE TABLE avro_timestamp_staging (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
+   ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+   COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
+   STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_timestamp_staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' OVERWRITE INTO TABLE avro_timestamp_staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@avro_timestamp_staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_timestamp.txt' OVERWRITE INTO TABLE avro_timestamp_staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@avro_timestamp_staging
+PREHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
+  PARTITIONED BY (p1 int, p2 timestamp)
+  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
+  STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_timestamp
+POSTHOOK: query: CREATE TABLE avro_timestamp (d timestamp, m1 map<string, timestamp>, l1 array<timestamp>)
+  PARTITIONED BY (p1 int, p2 timestamp)
+  ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+  COLLECTION ITEMS TERMINATED BY ',' MAP KEYS TERMINATED BY ':'
+  STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_timestamp
+PREHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_timestamp_staging
+PREHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+POSTHOOK: query: INSERT OVERWRITE TABLE avro_timestamp PARTITION(p1=2, p2='2014-09-26 07:08:09.123') SELECT * FROM avro_timestamp_staging
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_timestamp_staging
+POSTHOOK: Output: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).d SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:d, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).l1 SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:l1, type:array<timestamp>, comment:null), ]
+POSTHOOK: Lineage: avro_timestamp PARTITION(p1=2,p2=2014-09-26 07:08:09.123).m1 SIMPLE [(avro_timestamp_staging)avro_timestamp_staging.FieldSchema(name:m1, type:map<string,timestamp>, comment:null), ]
+PREHOOK: query: SELECT * FROM avro_timestamp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_timestamp
+PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_timestamp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_timestamp
+POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+2012-02-21 07:08:09.123	{"bar":"1998-05-07 07:08:09.123","foo":"1980-12-16 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+1947-02-11 07:08:09.123	{"baz":"1921-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+PREHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_timestamp
+PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT d, COUNT(d) FROM avro_timestamp GROUP BY d
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_timestamp
+POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+1947-02-11 07:08:09.123	1
+2012-02-21 07:08:09.123	1
+2014-02-11 07:08:09.123	1
+8200-02-11 07:08:09.123	1
+PREHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_timestamp
+PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d!='1947-02-11 07:08:09.123'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_timestamp
+POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+2012-02-21 07:08:09.123	{"bar":"1998-05-07 07:08:09.123","foo":"1980-12-16 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+PREHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_timestamp
+PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d<'2014-12-21 07:08:09.123'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_timestamp
+POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+2012-02-21 07:08:09.123	{"bar":"1998-05-07 07:08:09.123","foo":"1980-12-16 07:08:09.123"}	["2011-09-04 07:08:09.123","2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+2014-02-11 07:08:09.123	{"baz":"1981-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+1947-02-11 07:08:09.123	{"baz":"1921-12-16 07:08:09.123"}	["2011-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123
+PREHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_timestamp
+PREHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM avro_timestamp WHERE d>'8000-12-01 07:08:09.123'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_timestamp
+POSTHOOK: Input: default@avro_timestamp@p1=2/p2=2014-09-26 07%3A08%3A09.123
+#### A masked pattern was here ####
+8200-02-11 07:08:09.123	{"baz":"6981-12-16 07:08:09.123"}	["1039-09-05 07:08:09.123"]	2	2014-09-26 07:08:09.123


[06/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
index 8163773..86c5bd7 100644
--- a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
@@ -908,46 +908,46 @@ STAGE PLANS:
                 outputColumnNames: _col2, _col4, _col5, _col6
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator
-                  expressions: _col6 (type: string), _col5 (type: int), _col4 (type: int), _col2 (type: int)
-                  outputColumnNames: _col0, _col1, _col2, _col4
+                  expressions: _col4 (type: int), _col5 (type: int), _col6 (type: string), _col2 (type: int)
+                  outputColumnNames: _col4, _col5, _col6, _col2
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Group By Operator
-                    aggregations: avg(_col4), stddev_samp(_col4)
-                    keys: _col0 (type: string), _col1 (type: int), _col2 (type: int), 4 (type: int)
+                    aggregations: stddev_samp(_col2), avg(_col2)
+                    keys: _col4 (type: int), _col5 (type: int), _col6 (type: string)
                     mode: hash
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), 4 (type: int)
-                      sort order: ++++
-                      Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: int), 4 (type: int)
+                      key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string)
+                      sort order: +++
+                      Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: string)
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      value expressions: _col4 (type: struct<count:bigint,sum:double,input:int>), _col5 (type: struct<count:bigint,sum:double,variance:double>)
+                      value expressions: _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,input:int>)
         Reducer 15 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: avg(VALUE._col0), stddev_samp(VALUE._col1)
-                keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: int), 4 (type: int)
+                aggregations: stddev_samp(VALUE._col0), avg(VALUE._col1)
+                keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: string)
                 mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), _col5 (type: double)
-                  outputColumnNames: _col1, _col2, _col4, _col5
+                  expressions: _col1 (type: int), _col0 (type: int), _col3 (type: double), _col4 (type: double)
+                  outputColumnNames: _col1, _col2, _col3, _col4
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col5 / _col4) > 1.0)) END (type: boolean)
+                    predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col5 / _col4)) END (type: double)
-                      outputColumnNames: _col1, _col2, _col4, _col5
+                      expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double)
+                      outputColumnNames: _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col2 (type: int), _col1 (type: int)
                         sort order: ++
                         Map-reduce partition columns: _col2 (type: int), _col1 (type: int)
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                        value expressions: _col4 (type: double), _col5 (type: double)
+                        value expressions: _col3 (type: double), _col4 (type: double)
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -991,46 +991,46 @@ STAGE PLANS:
                 outputColumnNames: _col2, _col4, _col5, _col6
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator
-                  expressions: _col6 (type: string), _col5 (type: int), _col4 (type: int), _col2 (type: int)
-                  outputColumnNames: _col0, _col1, _col2, _col4
+                  expressions: _col4 (type: int), _col5 (type: int), _col6 (type: string), _col2 (type: int)
+                  outputColumnNames: _col4, _col5, _col6, _col2
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Group By Operator
-                    aggregations: avg(_col4), stddev_samp(_col4)
-                    keys: _col0 (type: string), _col1 (type: int), _col2 (type: int), 3 (type: int)
+                    aggregations: stddev_samp(_col2), avg(_col2)
+                    keys: _col4 (type: int), _col5 (type: int), _col6 (type: string)
                     mode: hash
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    outputColumnNames: _col0, _col1, _col2, _col3, _col4
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), 3 (type: int)
-                      sort order: ++++
-                      Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: int), 3 (type: int)
+                      key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string)
+                      sort order: +++
+                      Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: string)
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      value expressions: _col4 (type: struct<count:bigint,sum:double,input:int>), _col5 (type: struct<count:bigint,sum:double,variance:double>)
+                      value expressions: _col3 (type: struct<count:bigint,sum:double,variance:double>), _col4 (type: struct<count:bigint,sum:double,input:int>)
         Reducer 5 
             Reduce Operator Tree:
               Group By Operator
-                aggregations: avg(VALUE._col0), stddev_samp(VALUE._col1)
-                keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: int), 3 (type: int)
+                aggregations: stddev_samp(VALUE._col0), avg(VALUE._col1)
+                keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: string)
                 mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), _col5 (type: double)
-                  outputColumnNames: _col1, _col2, _col4, _col5
+                  expressions: _col1 (type: int), _col0 (type: int), _col3 (type: double), _col4 (type: double)
+                  outputColumnNames: _col1, _col2, _col3, _col4
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col5 / _col4) > 1.0)) END (type: boolean)
+                    predicate: CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
-                      expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col5 / _col4)) END (type: double)
-                      outputColumnNames: _col1, _col2, _col4, _col5
+                      expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), CASE WHEN ((_col4 = 0.0)) THEN (null) ELSE ((_col3 / _col4)) END (type: double)
+                      outputColumnNames: _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col2 (type: int), _col1 (type: int)
                         sort order: ++
                         Map-reduce partition columns: _col2 (type: int), _col1 (type: int)
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                        value expressions: _col4 (type: double), _col5 (type: double)
+                        value expressions: _col3 (type: double), _col4 (type: double)
         Reducer 6 
             Reduce Operator Tree:
               Join Operator
@@ -1039,10 +1039,10 @@ STAGE PLANS:
                 keys:
                   0 _col2 (type: int), _col1 (type: int)
                   1 _col2 (type: int), _col1 (type: int)
-                outputColumnNames: _col1, _col2, _col4, _col5, _col7, _col8, _col10, _col11
+                outputColumnNames: _col1, _col2, _col3, _col4, _col6, _col7, _col8, _col9
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: int), _col2 (type: int), _col4 (type: double), _col5 (type: double), _col7 (type: int), _col8 (type: int), _col10 (type: double), _col11 (type: double)
+                  expressions: _col1 (type: int), _col2 (type: int), _col3 (type: double), _col4 (type: double), _col6 (type: int), _col7 (type: int), _col8 (type: double), _col9 (type: double)
                   outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col6, _col8, _col9
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Reduce Output Operator
@@ -1053,7 +1053,7 @@ STAGE PLANS:
         Reducer 7 
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), 3 (type: int), KEY.reducesinkkey3 (type: double), KEY.reducesinkkey4 (type: double), VALUE._col0 (type: int), VALUE._col1 (type: int), 4 (type: int), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey7 (type: double)
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: double), KEY.reducesinkkey4 (type: double), VALUE._col0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey5 (type: int), KEY.reducesinkkey6 (type: double), KEY.reducesinkkey7 (type: double)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
index a21ea9c..4c4ae03 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out
@@ -836,16 +836,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col1
+                    outputColumnNames: key
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: 1 (type: int), _col1 (type: string)
+                      aggregations: count(key)
+                      keys: key (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: 1 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
+                        expressions: 1 (type: int), UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
                         outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -974,7 +974,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1028,22 +1028,22 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: key, val
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                      aggregations: count(val)
+                      keys: key (type: string), val (type: string)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), 1 (type: int), _col2 (type: string)
-                        null sort order: aaa
-                        sort order: +++
-                        Map-reduce partition columns: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        null sort order: aa
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                         tag: -1
-                        value expressions: _col3 (type: bigint)
+                        value expressions: _col2 (type: bigint)
                         auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
@@ -1103,12 +1103,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string), 1 (type: int), KEY._col2 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -1184,7 +1184,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -3074,16 +3074,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: key, val
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                      aggregations: count(val)
+                      keys: key (type: string), val (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -3212,7 +3212,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -3268,16 +3268,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: key, val
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int)
+                      aggregations: count(val)
+                      keys: key (type: string), val (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), 2 (type: int), UDFToInteger(_col4) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), 2 (type: int), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -3406,7 +3406,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -3459,16 +3459,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                      keys: _col0 (type: string), _col1 (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -3657,16 +3657,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      keys: _col0 (type: string), 2 (type: int), _col2 (type: string)
+                      keys: _col0 (type: string), _col1 (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
index d0f5952..e575e97 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out
@@ -855,16 +855,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col1
+                    outputColumnNames: key
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: 1 (type: int), _col1 (type: string)
+                      aggregations: count(key)
+                      keys: key (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: 1 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
+                        expressions: 1 (type: int), UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
                         outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -993,7 +993,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1048,22 +1048,22 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: key, val
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                      aggregations: count(val)
+                      keys: key (type: string), val (type: string)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), 1 (type: int), _col2 (type: string)
-                        null sort order: aaa
-                        sort order: +++
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        null sort order: aa
+                        sort order: ++
                         Map-reduce partition columns: rand() (type: double)
                         Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                         tag: -1
-                        value expressions: _col3 (type: bigint)
+                        value expressions: _col2 (type: bigint)
                         auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
@@ -1123,30 +1123,30 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string), 1 (type: int), KEY._col2 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: partials
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), 1 (type: int), _col2 (type: string)
-                  null sort order: aaa
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  null sort order: aa
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   tag: -1
-                  value expressions: _col3 (type: bigint)
+                  value expressions: _col2 (type: bigint)
                   auto parallelism: false
         Reducer 3 
             Needs Tagging: false
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                keys: KEY._col0 (type: string), 1 (type: int), KEY._col2 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -1222,7 +1222,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -3207,16 +3207,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: key, val
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                      aggregations: count(val)
+                      keys: key (type: string), val (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -3345,7 +3345,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -3401,16 +3401,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: key, val
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int)
+                      aggregations: count(val)
+                      keys: key (type: string), val (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), 2 (type: int), UDFToInteger(_col4) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), 2 (type: int), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -3539,7 +3539,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -3592,16 +3592,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                      keys: _col0 (type: string), _col1 (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator
@@ -3790,16 +3790,16 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col2
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count(1)
-                      keys: _col0 (type: string), 2 (type: int), _col2 (type: string)
+                      keys: _col0 (type: string), _col1 (type: string)
                       mode: final
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                        expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                         outputColumnNames: _col0, _col1, _col2, _col3
                         Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                         File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join38.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join38.q.out b/ql/src/test/results/clientpositive/spark/join38.q.out
index 71d59e2..5177dca 100644
--- a/ql/src/test/results/clientpositive/spark/join38.q.out
+++ b/ql/src/test/results/clientpositive/spark/join38.q.out
@@ -15,17 +15,17 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@tmp
 POSTHOOK: Lineage: tmp.col0 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col10 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col11 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col4 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col5 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col6 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col7 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col8 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col9 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp.col1 SIMPLE []
+POSTHOOK: Lineage: tmp.col10 SIMPLE []
+POSTHOOK: Lineage: tmp.col11 SIMPLE []
+POSTHOOK: Lineage: tmp.col2 SIMPLE []
+POSTHOOK: Lineage: tmp.col3 SIMPLE []
+POSTHOOK: Lineage: tmp.col4 SIMPLE []
+POSTHOOK: Lineage: tmp.col5 SIMPLE []
+POSTHOOK: Lineage: tmp.col6 SIMPLE []
+POSTHOOK: Lineage: tmp.col7 SIMPLE []
+POSTHOOK: Lineage: tmp.col8 SIMPLE []
+POSTHOOK: Lineage: tmp.col9 SIMPLE []
 PREHOOK: query: select * from tmp
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out
index 270053c..412a9eb 100644
--- a/ql/src/test/results/clientpositive/spark/join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/join8.q.out
@@ -161,7 +161,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dest1
 POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION []
 POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
index 2c8034f..ee636a0 100644
--- a/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_alt_syntax.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select p1.p_name, p2.p_name
 from part p1 , part p2
 PREHOOK: type: QUERY
@@ -258,7 +258,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select p1.p_name, p2.p_name, p3.p_name
 from part p1 , part p2 , part p3 
 where p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out
index 0610d13..a9bbe15 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_1.q.out
@@ -192,7 +192,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 on p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name
 PREHOOK: type: QUERY
@@ -301,7 +301,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 on p2.p_partkey = 1 and p3.p_name = p2.p_name
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out
index 0e748fb..ea5895b 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_3.q.out
@@ -196,7 +196,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 
 where p2.p_partkey + p1.p_partkey = p1.p_partkey and p3.p_name = p2.p_name
@@ -307,7 +307,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part p2 join part p3 
 where p2.p_partkey = 1 and p3.p_name = p2.p_name

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out
index 45fba92..303c77f 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual1.q.out
@@ -248,7 +248,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 on p2_partkey + p_partkey = p1.p_partkey and p3_name = p2_name
 PREHOOK: type: QUERY
@@ -357,7 +357,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 on p2_partkey = 1 and p3_name = p2_name
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out
index 9211cb6..641929c 100644
--- a/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_cond_pushdown_unqual3.q.out
@@ -252,7 +252,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 
 where p2_partkey + p1.p_partkey = p1.p_partkey and p3_name = p2_name
@@ -363,7 +363,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select *
 from part p1 join part2 p2 join part3 p3 
 where p2_partkey = 1 and p3_name = p2_name

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_reorder.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_reorder.q.out b/ql/src/test/results/clientpositive/spark/join_reorder.q.out
index df6a36e..cc9f9a5 100644
--- a/ql/src/test/results/clientpositive/spark/join_reorder.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_reorder.q.out
@@ -98,9 +98,9 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
+                        key expressions: (UDFToDouble(_col0) + 1.0) (type: double)
                         sort order: +
-                        Map-reduce partition columns: (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
+                        Map-reduce partition columns: (UDFToDouble(_col0) + 1.0) (type: double)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: string)
         Reducer 2 
@@ -110,7 +110,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
+                  1 (UDFToDouble(_col0) + 1.0) (type: double)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/join_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_view.q.out b/ql/src/test/results/clientpositive/spark/join_view.q.out
index 14a28be..9d8f6d7 100644
--- a/ql/src/test/results/clientpositive/spark/join_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_view.q.out
@@ -38,6 +38,7 @@ POSTHOOK: Input: default@invites
 POSTHOOK: Input: default@invites2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@v
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from v where ds='2011-09-01'
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from v where ds='2011-09-01'
@@ -50,7 +51,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 3 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -66,9 +67,7 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Reduce Output Operator
-                        key expressions: '2011-09-01' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: '2011-09-01' (type: string)
+                        sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                         value expressions: _col0 (type: string)
         Map 3 
@@ -84,9 +83,7 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Reduce Output Operator
-                        key expressions: '2011-09-01' (type: string)
-                        sort order: +
-                        Map-reduce partition columns: '2011-09-01' (type: string)
+                        sort order: 
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                         value expressions: _col0 (type: int)
         Reducer 2 
@@ -95,8 +92,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col1 (type: string)
-                  1 _col1 (type: string)
+                  0 
+                  1 
                 outputColumnNames: _col0, _col2
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out
index 21b7519..cd16787 100644
--- a/ql/src/test/results/clientpositive/spark/pcr.q.out
+++ b/ql/src/test/results/clientpositive/spark/pcr.q.out
@@ -1547,7 +1547,7 @@ STAGE PLANS:
             Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
-                expressions: 14 (type: int), KEY.reducesinkkey1 (type: string)
+                expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -2445,19 +2445,23 @@ STAGE PLANS:
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col4
                 Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  null sort order: a
-                  sort order: +
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: int), _col4 (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col1 (type: string), _col3 (type: int), _col4 (type: string)
-                  auto parallelism: false
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string)
+                    auto parallelism: false
         Reducer 3 
             Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), '2000-04-08' (type: string)
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col1 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -2710,19 +2714,23 @@ STAGE PLANS:
                   1 _col0 (type: int)
                 outputColumnNames: _col0, _col1, _col3, _col4
                 Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  null sort order: a
-                  sort order: +
+                Select Operator
+                  expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: int), _col4 (type: string), '2000-04-09' (type: string)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                   Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-                  tag: -1
-                  value expressions: _col1 (type: string), _col3 (type: int), _col4 (type: string)
-                  auto parallelism: false
+                  Reduce Output Operator
+                    key expressions: _col0 (type: int)
+                    null sort order: a
+                    sort order: +
+                    Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+                    tag: -1
+                    value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string), _col5 (type: string)
+                    auto parallelism: false
         Reducer 3 
             Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), '2000-04-09' (type: string)
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col4 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -4159,7 +4167,7 @@ STAGE PLANS:
             Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), '2008-04-08' (type: string), KEY.reducesinkkey2 (type: string)
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -4344,7 +4352,7 @@ STAGE PLANS:
             Needs Tagging: false
             Reduce Operator Tree:
               Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), '11' (type: string)
+                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string)
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join5.q.out b/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
index 1c2b592..f08eb4d 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join5.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Lineage: t1.id1 SIMPLE []
 POSTHOOK: Lineage: t1.id2 SIMPLE []
 POSTHOOK: Lineage: t2.d SIMPLE []
 POSTHOOK: Lineage: t2.id SIMPLE []
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain
 select a.*,b.d d1,c.d d2 from
   t1 a join t2 b on (a.id1 = b.id)
@@ -148,7 +148,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain
 select * from (
 select a.*,b.d d1,c.d d2 from
@@ -271,7 +271,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: select * from (
 select a.*,b.d d1,c.d d2 from
   t1 a join t2 b on (a.id1 = b.id)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
index 312b3bd..ca6d1f8 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
@@ -43,35 +43,35 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
+                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string)
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -95,7 +95,7 @@ STAGE PLANS:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
-                     Inner Join 1 to 2
+                     Inner Join 0 to 2
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
@@ -103,7 +103,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col0 (type: string)
+                  expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -417,35 +417,35 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: string)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
+                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: string)
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -469,7 +469,7 @@ STAGE PLANS:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
-                     Inner Join 1 to 2
+                     Inner Join 0 to 2
                 keys:
                   0 _col0 (type: string)
                   1 _col0 (type: string)
@@ -477,7 +477,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col0 (type: string)
+                  expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
index ef8c674..e4c3d2b 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join5.q.out
@@ -30,7 +30,7 @@ POSTHOOK: query: create table t4 (id int, key string, value string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t4
-Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20
@@ -128,7 +128,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20
@@ -222,7 +222,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/skewjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoin.q.out b/ql/src/test/results/clientpositive/spark/skewjoin.q.out
index 1475995..6f9e674 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoin.q.out
@@ -847,9 +847,9 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                        key expressions: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                         sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                        Map-reduce partition columns: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
@@ -860,7 +860,7 @@ STAGE PLANS:
                 handleSkewJoin: true
                 keys:
                   0 _col0 (type: string), UDFToDouble(substring(_col1, 5)) (type: double)
-                  1 _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                  1 _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                 outputColumnNames: _col2, _col3
                 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                 Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
index 4b392ba..75422ec 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out
@@ -46,8 +46,9 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_bucket_3
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
-Warning: Shuffle Join JOIN[20][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 6' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[18][tables = [$hdt$_1, $hdt$_2]] in Work 'Reducer 6' is a cross product
+Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 3' is a cross product
 PREHOOK: query: explain 
 select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5
 PREHOOK: type: QUERY
@@ -64,7 +65,7 @@ STAGE PLANS:
       Edges:
         Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 4 (PARTITION-LEVEL SORT, 1)
         Reducer 6 <- Map 5 (PARTITION-LEVEL SORT, 1), Map 7 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2), Reducer 6 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 1), Reducer 6 (PARTITION-LEVEL SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -129,9 +130,7 @@ STAGE PLANS:
                   1 
                 Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 5 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: 5 (type: int)
+                  sort order: 
                   Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -139,8 +138,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 5 (type: int)
-                  1 5 (type: int)
+                  0 
+                  1 
                 Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 5 (type: int), 5 (type: int)
@@ -163,9 +162,7 @@ STAGE PLANS:
                   1 
                 Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 5 (type: int)
-                  sort order: +
-                  Map-reduce partition columns: 5 (type: int)
+                  sort order: 
                   Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
@@ -174,8 +171,9 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- explain
 -- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value
 
@@ -245,8 +243,8 @@ STAGE PLANS:
                         Statistics: Num rows: 28 Data size: 114 Basic stats: COMPLETE Column stats: NONE
                         Spark HashTable Sink Operator
                           keys:
-                            0 5 (type: int)
-                            1 5 (type: int)
+                            0 
+                            1 
             Local Work:
               Map Reduce Local Work
         Map 3 
@@ -293,8 +291,8 @@ STAGE PLANS:
                           condition map:
                                Inner Join 0 to 1
                           keys:
-                            0 5 (type: int)
-                            1 5 (type: int)
+                            0 
+                            1 
                           input vertices:
                             0 Map 1
                           Statistics: Num rows: 31 Data size: 129 Basic stats: COMPLETE Column stats: NONE
@@ -318,8 +316,9 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
-Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@smb_bucket_1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
index bbf24fd..71d6e09 100644
--- a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
+++ b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out
@@ -77,7 +77,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_3
+Operator:GBY_2
 Table:default@t1
 Keys:key
 
@@ -90,7 +90,7 @@ PREHOOK: query: SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_3
+Operator:GBY_2
 Table:default@t1
 Keys:key,val
 
@@ -104,7 +104,7 @@ PREHOOK: query: SELECT key, 1, val, 2, count(1) FROM T1 GROUP BY key, 1, val, 2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
 #### A masked pattern was here ####
-Operator:GBY_3
+Operator:GBY_2
 Table:default@t1
 Keys:key,val
 
@@ -413,7 +413,7 @@ PREHOOK: Input: default@t2
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 
@@ -439,7 +439,7 @@ Operator:JOIN_8
 Table:default@t1
 Keys:key
 Table:default@t2
-Keys:val,key
+Keys:key
 
 PREHOOK: query: -- no mapping on functions
 SELECT *
@@ -474,7 +474,7 @@ PREHOOK: Input: default@t2
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 
@@ -505,7 +505,7 @@ PREHOOK: Input: default@t3
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 
@@ -518,6 +518,7 @@ Keys:val
 13.0	1
 17.0	1
 46.0	1
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 4' is a cross product
 PREHOOK: query: -- join followed by join
 SELECT *
 FROM
@@ -543,7 +544,7 @@ PREHOOK: Input: default@t3
 #### A masked pattern was here ####
 Operator:JOIN_8
 Table:default@t1
-Keys:val,key
+Keys:key
 Table:default@t2
 Keys:key
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/spark/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_view.q.out b/ql/src/test/results/clientpositive/spark/union_view.q.out
index 3372afb..7ac641f 100644
--- a/ql/src/test/results/clientpositive/spark/union_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_view.q.out
@@ -541,14 +541,14 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: _col0 (type: string), _col1 (type: string)
-                        outputColumnNames: _col1, _col2
+                        expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col2 (type: string)
                           sort order: +
                           Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                          value expressions: _col0 (type: int), _col1 (type: string)
         Map 3 
             Map Operator Tree:
                 TableScan
@@ -563,14 +563,14 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: _col0 (type: string), _col1 (type: string)
-                        outputColumnNames: _col1, _col2
+                        expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col2 (type: string)
                           sort order: +
                           Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                          value expressions: _col0 (type: int), _col1 (type: string)
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -585,18 +585,18 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: _col0 (type: string), _col1 (type: string)
-                        outputColumnNames: _col1, _col2
+                        expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+                        outputColumnNames: _col0, _col1, _col2
                         Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col2 (type: string)
                           sort order: +
                           Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                          value expressions: _col0 (type: int), _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: 86 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string)
+                expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator


[55/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
new file mode 100644
index 0000000..0e11f3f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out
@@ -0,0 +1,424 @@
+PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
+create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
+    partitioned by (ds String, hr String) 
+    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_mul_col
+POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
+create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
+    partitioned by (ds String, hr String) 
+    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_mul_col
+PREHOOK: query: -- list bucketing DML 
+explain extended
+insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '11')
+select 1, key, 1, value, 1 from src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML 
+explain extended
+insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '11')
+select 1, key, 1, value, 1 from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: '1' (type: string), key (type: string), '1' (type: string), value (type: string), '1' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns col1,col2,col3,col4,col5
+                      columns.comments 
+                      columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_mul_col
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_mul_col
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns col1,col2,col3,col4,col5
+                columns.comments 
+                columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_mul_col
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_mul_col
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')
+select 1, key, 1, value, 1 from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11')
+select 1, key, 1, value, 1 from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col1 EXPRESSION []
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col3 EXPRESSION []
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col5 EXPRESSION []
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_mul_col
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_mul_col
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_mul_col
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_mul_col
+# col_name            	data_type           	comment             
+	 	 
+col1                	string              	                    
+col2                	string              	                    
+col3                	string              	                    
+col4                	string              	                    
+col5                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_mul_col	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	6312                
+	totalSize           	7094                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[col2, col4]        	 
+Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=466/col4=val_466, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=287/col4=val_287, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=11/col2=82/col4=val_82}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain extended
+select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns col1,col2,col3,col4,col5
+              columns.comments 
+              columns.types string:string:string:string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_mul_col
+              numFiles 4
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 6312
+              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 7094
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns col1,col2,col3,col4,col5
+                columns.comments 
+                columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_mul_col
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_mul_col
+            name: default.list_bucketing_mul_col
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_mul_col
+          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
+            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: col1 (type: string), '466' (type: string), col3 (type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' (type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_mul_col
+PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "466" and col4 = "val_466"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+1	466	1	val_466	1	2008-04-08	11
+1	466	1	val_466	1	2008-04-08	11
+1	466	1	val_466	1	2008-04-08	11
+PREHOOK: query: explain extended
+select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns col1,col2,col3,col4,col5
+              columns.comments 
+              columns.types string:string:string:string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_mul_col
+              numFiles 4
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 6312
+              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 7094
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns col1,col2,col3,col4,col5
+                columns.comments 
+                columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_mul_col
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_mul_col
+            name: default.list_bucketing_mul_col
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_mul_col
+          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((col2 = '382') and (col4 = 'val_382')) (type: boolean)
+            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: col1 (type: string), '382' (type: string), col3 (type: string), 'val_382' (type: string), col5 (type: string), '2008-04-08' (type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_mul_col
+PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='11' and col2 = "382" and col4 = "val_382"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+1	382	1	val_382	1	2008-04-08	11
+1	382	1	val_382	1	2008-04-08	11
+PREHOOK: query: drop table list_bucketing_mul_col
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_mul_col
+PREHOOK: Output: default@list_bucketing_mul_col
+POSTHOOK: query: drop table list_bucketing_mul_col
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: Output: default@list_bucketing_mul_col

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
deleted file mode 100644
index bfce335..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
+++ /dev/null
@@ -1,337 +0,0 @@
-PREHOOK: query: -- Ensure skewed value map has escaped directory name
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: -- Ensure skewed value map has escaped directory name
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_mul_col
-PREHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: '1' (type: string), key (type: string), '1' (type: string), value (type: string), '1' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns col1,col2,col3,col4,col5
-                      columns.comments 
-                      columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_mul_col
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_mul_col
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 2013-01-23+18:00:99
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col1 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col3 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col5 EXPRESSION []
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_mul_col
-ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-# col_name            	data_type           	comment             
-	 	 
-col1                	string              	                    
-col2                	string              	                    
-col3                	string              	                    
-col4                	string              	                    
-col5                	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 2013-01-23+18:00:99]	 
-Database:           	default             	 
-Table:              	list_bucketing_mul_col	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	6312                
-	totalSize           	7094                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[col2, col4]        	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=82/col4=val_82, [466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=287/col4=val_287}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 2013-01-23+18:00:99
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns col1,col2,col3,col4,col5
-              columns.comments 
-              columns.types string:string:string:string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_mul_col
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 6312
-              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 7094
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-            name: default.list_bucketing_mul_col
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_mul_col
-          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
-            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: col1 (type: string), '466' (type: string), col3 (type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' (type: string), '2013-01-23+18:00:99' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-#### A masked pattern was here ####
-1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
-1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
-1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
-PREHOOK: query: drop table list_bucketing_mul_col
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: drop table list_bucketing_mul_col
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Output: default@list_bucketing_mul_col

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
deleted file mode 100644
index f7a1039..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.8.out
+++ /dev/null
@@ -1,439 +0,0 @@
-PREHOOK: query: -- Ensure skewed value map has escaped directory name
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: -- Ensure skewed value map has escaped directory name
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
-create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
-    partitioned by (ds String, hr String) 
-    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_mul_col
-PREHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML 
-explain extended
-insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            src
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_mul_col
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '2013-01-23+18:00:99'
-      TOK_SELECT
-         TOK_SELEXPR
-            1
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            1
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            1
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: UDFToString(1) (type: string), key (type: string), UDFToString(1) (type: string), value (type: string), UDFToString(1) (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns col1,col2,col3,col4,col5
-                      columns.comments 
-                      columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_mul_col
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_mul_col
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-      Truncated Path -> Alias:
-        /src [$hdt$_0:src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 2013-01-23+18:00:99
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')
-select 1, key, 1, value, 1 from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col1 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col3 EXPRESSION []
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col5 EXPRESSION []
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_mul_col
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_mul_col
-ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-# col_name            	data_type           	comment             
-	 	 
-col1                	string              	                    
-col2                	string              	                    
-col3                	string              	                    
-col4                	string              	                    
-col5                	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 2013-01-23+18:00:99]	 
-Database:           	default             	 
-Table:              	list_bucketing_mul_col	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	6312                
-	totalSize           	7094                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[col2, col4]        	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=287/col4=val_287, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=82/col4=val_82}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_mul_col
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '2013-01-23+18:00:99'
-               =
-                  TOK_TABLE_OR_COL
-                     col2
-                  "466"
-            =
-               TOK_TABLE_OR_COL
-                  col4
-               "val_466"
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_mul_col
-            Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
-              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: col1 (type: string), '466' (type: string), col3 (type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' (type: string), '2013-01-23+18:00:99' (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
-                Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3,_col4,_col5,_col6
-                        columns.types string:string:string:string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: col4=val_466
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 2013-01-23+18:00:99
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns col1,col2,col3,col4,col5
-              columns.comments 
-              columns.types string:string:string:string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_mul_col
-              numFiles 4
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 6312
-              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 7094
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns col1,col2,col3,col4,col5
-                columns.comments 
-                columns.types string:string:string:string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_mul_col
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_mul_col
-            name: default.list_bucketing_mul_col
-      Truncated Path -> Alias:
-        /list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466 [list_bucketing_mul_col]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_mul_col 
-where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
-#### A masked pattern was here ####
-1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
-1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
-1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
-PREHOOK: query: drop table list_bucketing_mul_col
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_mul_col
-PREHOOK: Output: default@list_bucketing_mul_col
-POSTHOOK: query: drop table list_bucketing_mul_col
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_mul_col
-POSTHOOK: Output: default@list_bucketing_mul_col

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
new file mode 100644
index 0000000..93ebef0
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out
@@ -0,0 +1,335 @@
+PREHOOK: query: -- Ensure skewed value map has escaped directory name
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
+create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
+    partitioned by (ds String, hr String) 
+    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_mul_col
+POSTHOOK: query: -- Ensure skewed value map has escaped directory name
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns
+create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) 
+    partitioned by (ds String, hr String) 
+    skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_mul_col
+PREHOOK: query: -- list bucketing DML 
+explain extended
+insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '2013-01-23+18:00:99')
+select 1, key, 1, value, 1 from src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML 
+explain extended
+insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08',  hr = '2013-01-23+18:00:99')
+select 1, key, 1, value, 1 from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: '1' (type: string), key (type: string), '1' (type: string), value (type: string), '1' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns col1,col2,col3,col4,col5
+                      columns.comments 
+                      columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_mul_col
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_mul_col
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+      Truncated Path -> Alias:
+        /src [src]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 2013-01-23+18:00:99
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns col1,col2,col3,col4,col5
+                columns.comments 
+                columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_mul_col
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_mul_col
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')
+select 1, key, 1, value, 1 from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
+POSTHOOK: query: insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99')
+select 1, key, 1, value, 1 from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col1 EXPRESSION []
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col2 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col3 EXPRESSION []
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col5 EXPRESSION []
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_mul_col
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_mul_col
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_mul_col
+ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
+PREHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: query: desc formatted list_bucketing_mul_col partition (ds='2008-04-08', hr='2013-01-23+18:00:99')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_mul_col
+# col_name            	data_type           	comment             
+	 	 
+col1                	string              	                    
+col2                	string              	                    
+col3                	string              	                    
+col4                	string              	                    
+col5                	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 2013-01-23+18:00:99]	 
+Database:           	default             	 
+Table:              	list_bucketing_mul_col	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	6312                
+	totalSize           	7094                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[col2, col4]        	 
+Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[466, val_466]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=466/col4=val_466, [287, val_287]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=287/col4=val_287, [82, val_82]=/list_bucketing_mul_col/ds=2008-04-08/hr=2013-01-23+18%3A00%3A99/col2=82/col4=val_82}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: explain extended
+select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 2013-01-23+18:00:99
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns col1,col2,col3,col4,col5
+              columns.comments 
+              columns.types string:string:string:string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_mul_col
+              numFiles 4
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 6312
+              serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 7094
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns col1,col2,col3,col4,col5
+                columns.comments 
+                columns.types string:string:string:string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_mul_col
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_mul_col { string col1, string col2, string col3, string col4, string col5}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_mul_col
+            name: default.list_bucketing_mul_col
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_mul_col
+          Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((col2 = '466') and (col4 = 'val_466')) (type: boolean)
+            Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: col1 (type: string), '466' (type: string), col3 (type: string), 'val_466' (type: string), col5 (type: string), '2008-04-08' (type: string), '2013-01-23+18:00:99' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+              Statistics: Num rows: 125 Data size: 1578 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_mul_col
+PREHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_mul_col 
+where ds='2008-04-08' and hr='2013-01-23+18:00:99' and col2 = "466" and col4 = "val_466"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: Input: default@list_bucketing_mul_col@ds=2008-04-08/hr=2013-01-23+18%3A00%3A99
+#### A masked pattern was here ####
+1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
+1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
+1	466	1	val_466	1	2008-04-08	2013-01-23+18:00:99
+PREHOOK: query: drop table list_bucketing_mul_col
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_mul_col
+PREHOOK: Output: default@list_bucketing_mul_col
+POSTHOOK: query: drop table list_bucketing_mul_col
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_mul_col
+POSTHOOK: Output: default@list_bucketing_mul_col


[34/66] [abbrv] hive git commit: HIVE-13587: Set Hive pom to use Hadoop 2.6.1 (Mohit Sabharwal, reviewd by Sergio Pena)

Posted by sp...@apache.org.
HIVE-13587: Set Hive pom to use Hadoop 2.6.1 (Mohit Sabharwal, reviewd by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/86944354
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/86944354
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/86944354

Branch: refs/heads/java8
Commit: 869443542f3175010e43f3664c7ac8af916788df
Parents: 3a2a3e1
Author: Sergio Pena <se...@cloudera.com>
Authored: Mon May 2 13:08:54 2016 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 26 10:36:47 2016 -0500

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/86944354/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 117aec9..3176caf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -132,7 +132,7 @@
     <dropwizard-metrics-hadoop-metrics2-reporter.version>0.1.0</dropwizard-metrics-hadoop-metrics2-reporter.version>
     <guava.version>14.0.1</guava.version>
     <groovy.version>2.4.4</groovy.version>
-    <hadoop.version>2.6.0</hadoop.version>
+    <hadoop.version>2.6.1</hadoop.version>
     <hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>
     <hbase.version>1.1.1</hbase.version>
     <!-- required for logging test to avoid including hbase which pulls disruptor transitively -->


[24/66] [abbrv] hive git commit: HIVE-13832: Add missing license header to files (Jesus Camacho Rodriguez, reviewed by Vikram Dixit K)

Posted by sp...@apache.org.
HIVE-13832: Add missing license header to files (Jesus Camacho Rodriguez, reviewed by Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cd274547
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cd274547
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cd274547

Branch: refs/heads/java8
Commit: cd274547f33f9d5e5a86d9260da6defa1aea9a90
Parents: 4c5ca8b
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue May 24 19:15:11 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed May 25 09:30:24 2016 +0100

----------------------------------------------------------------------
 llap-server/scripts/serviceCheckScript.sql         | 12 ++++++++++++
 llap-server/sql/serviceCheckScript.sql             | 12 ------------
 .../hbase/stats/IExtrapolatePartStatus.java        | 17 +++++++++++++++++
 .../hadoop/hive/metastore/model/MConstraint.java   | 17 +++++++++++++++++
 .../org/apache/orc/impl/DataReaderProperties.java  | 17 +++++++++++++++++
 .../apache/orc/impl/TestDataReaderProperties.java  | 17 +++++++++++++++++
 packaging/src/main/assembly/bin.xml                |  2 +-
 .../ql/exec/spark/status/impl/SparkJobUtils.java   | 17 +++++++++++++++++
 .../hadoop/hive/ql/txn/AcidWriteSetService.java    | 17 +++++++++++++++++
 .../hive/ql/plan/TestTezWorkConcurrency.java       | 17 +++++++++++++++++
 .../hadoop/hive/serde2/thrift/ThriftFormatter.java | 17 +++++++++++++++++
 .../hadoop/hive/ql/util/JavaDataModelTest.java     | 17 +++++++++++++++++
 12 files changed, 166 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/llap-server/scripts/serviceCheckScript.sql
----------------------------------------------------------------------
diff --git a/llap-server/scripts/serviceCheckScript.sql b/llap-server/scripts/serviceCheckScript.sql
new file mode 100644
index 0000000..0c3d64e
--- /dev/null
+++ b/llap-server/scripts/serviceCheckScript.sql
@@ -0,0 +1,12 @@
+set hive.execution.mode=llap;
+set hive.llap.execution.mode=all;
+
+
+CREATE temporary TABLE ${hiveconf:hiveLlapServiceCheck} (name VARCHAR(64), age INT)
+  CLUSTERED BY (age) INTO 2 BUCKETS STORED AS ORC;
+ 
+INSERT INTO TABLE ${hiveconf:hiveLlapServiceCheck}
+  VALUES ('fred flintstone', 35), ('barney rubble', 32);
+ 
+select count(1) from ${hiveconf:hiveLlapServiceCheck};
+

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/llap-server/sql/serviceCheckScript.sql
----------------------------------------------------------------------
diff --git a/llap-server/sql/serviceCheckScript.sql b/llap-server/sql/serviceCheckScript.sql
deleted file mode 100644
index 0c3d64e..0000000
--- a/llap-server/sql/serviceCheckScript.sql
+++ /dev/null
@@ -1,12 +0,0 @@
-set hive.execution.mode=llap;
-set hive.llap.execution.mode=all;
-
-
-CREATE temporary TABLE ${hiveconf:hiveLlapServiceCheck} (name VARCHAR(64), age INT)
-  CLUSTERED BY (age) INTO 2 BUCKETS STORED AS ORC;
- 
-INSERT INTO TABLE ${hiveconf:hiveLlapServiceCheck}
-  VALUES ('fred flintstone', 35), ('barney rubble', 32);
- 
-select count(1) from ${hiveconf:hiveLlapServiceCheck};
-

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java
index 99af060..af75bce 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/stats/IExtrapolatePartStatus.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.metastore.hbase.stats;
 
 import java.util.Map;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/metastore/src/model/org/apache/hadoop/hive/metastore/model/MConstraint.java
----------------------------------------------------------------------
diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MConstraint.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MConstraint.java
index 5876060..6da40ac 100644
--- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MConstraint.java
+++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MConstraint.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.metastore.model;
 
 import java.io.Serializable;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/orc/src/java/org/apache/orc/impl/DataReaderProperties.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/DataReaderProperties.java b/orc/src/java/org/apache/orc/impl/DataReaderProperties.java
index bb73d53..22301e8 100644
--- a/orc/src/java/org/apache/orc/impl/DataReaderProperties.java
+++ b/orc/src/java/org/apache/orc/impl/DataReaderProperties.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.orc.impl;
 
 import com.google.common.base.Preconditions;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/orc/src/test/org/apache/orc/impl/TestDataReaderProperties.java
----------------------------------------------------------------------
diff --git a/orc/src/test/org/apache/orc/impl/TestDataReaderProperties.java b/orc/src/test/org/apache/orc/impl/TestDataReaderProperties.java
index b9918f2..46546b0 100644
--- a/orc/src/test/org/apache/orc/impl/TestDataReaderProperties.java
+++ b/orc/src/test/org/apache/orc/impl/TestDataReaderProperties.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.orc.impl;
 
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/packaging/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
index 8fd934a..03d40f0 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -169,7 +169,7 @@
 
     <fileSet>
       <fileMode>666</fileMode>
-      <directory>${project.parent.basedir}/llap-server/sql</directory>
+      <directory>${project.parent.basedir}/llap-server/scripts</directory>
       <includes>
         <include>**/*</include>
       </includes>

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/SparkJobUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/SparkJobUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/SparkJobUtils.java
index 383d76f..eff208a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/SparkJobUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/status/impl/SparkJobUtils.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.ql.exec.spark.status.impl;
 
 import java.util.LinkedHashMap;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidWriteSetService.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidWriteSetService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidWriteSetService.java
index 9085a6a..2b8f815 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidWriteSetService.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/AcidWriteSetService.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.ql.txn;
 
 import org.apache.hadoop.hive.conf.HiveConf;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWorkConcurrency.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWorkConcurrency.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWorkConcurrency.java
index c59fd10..9af1c1b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWorkConcurrency.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWorkConcurrency.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.ql.plan;
 
 import com.google.common.collect.Lists;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/serde/src/java/org/apache/hadoop/hive/serde2/thrift/ThriftFormatter.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/ThriftFormatter.java b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/ThriftFormatter.java
index a4c120e..f93f999 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/ThriftFormatter.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/ThriftFormatter.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.serde2.thrift;
 
 import java.io.IOException;

http://git-wip-us.apache.org/repos/asf/hive/blob/cd274547/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java
----------------------------------------------------------------------
diff --git a/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java b/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java
index 35976cc..7cd2e12 100644
--- a/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java
+++ b/storage-api/src/test/org/apache/hadoop/hive/ql/util/JavaDataModelTest.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hive.ql.util;
 
 import org.junit.After;


[59/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 5c40dc4..0000000
--- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,693 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: key, value
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: key (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: key (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                tag: 0
-                value expressions: value (type: string)
-                auto parallelism: false
-          TableScan
-            alias: b
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), ds (type: string)
-              outputColumnNames: key, value, ds
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: key (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: key (type: string)
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                tag: 1
-                value expressions: value (type: string), ds (type: string)
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-09
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-09
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /src [a]
-        /srcpart/ds=2008-04-08/hr=11 [b]
-        /srcpart/ds=2008-04-08/hr=12 [b]
-        /srcpart/ds=2008-04-09/hr=11 [b]
-        /srcpart/ds=2008-04-09/hr=12 [b]
-      Needs Tagging: true
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Outer Join 0 to 1
-          filter mappings:
-            1 [0, 1]
-          filter predicates:
-            0 
-            1 {(VALUE.ds = '2008-04-08')}
-          keys:
-            0 key (type: string)
-            1 key (type: string)
-          outputColumnNames: key, value, key0, value0
-          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
-            outputColumnNames: key, value, key0, value0
-            Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key0) > 15.0) and (UDFToDouble(key0) < 25.0)) (type: boolean)
-              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns key,value,key0,value0
-                      columns.types string:string:string:string
-                      escape.delim \
-                      hive.serialization.extend.additional.nesting.levels true
-                      serialization.escape.crlf true
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19
-PREHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: key, value
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: key (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                  tag: 0
-                  value expressions: value (type: string)
-                  auto parallelism: false
-          TableScan
-            alias: b
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: key, value
-                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: key (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: key (type: string)
-                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                  tag: 1
-                  value expressions: value (type: string)
-                  auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /src [a]
-        /srcpart/ds=2008-04-08/hr=11 [b]
-        /srcpart/ds=2008-04-08/hr=12 [b]
-      Needs Tagging: true
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Right Outer Join0 to 1
-          keys:
-            0 key (type: string)
-            1 key (type: string)
-          outputColumnNames: key, value, key0, value0
-          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
-            outputColumnNames: key, value, key0, value0
-            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
-              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns key,value,key0,value0
-                      columns.types string:string:string:string
-                      escape.delim \
-                      hive.serialization.extend.additional.nesting.levels true
-                      serialization.escape.crlf true
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
new file mode 100644
index 0000000..200b8ee
--- /dev/null
+++ b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out
@@ -0,0 +1,691 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: key (type: string)
+                null sort order: a
+                sort order: +
+                Map-reduce partition columns: key (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                tag: 0
+                value expressions: value (type: string)
+                auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), ds (type: string)
+              outputColumnNames: key, value, ds
+              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: key (type: string)
+                null sort order: a
+                sort order: +
+                Map-reduce partition columns: key (type: string)
+                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                tag: 1
+                value expressions: value (type: string), ds (type: string)
+                auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-09
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [a]
+        /srcpart/ds=2008-04-08/hr=11 [b]
+        /srcpart/ds=2008-04-08/hr=12 [b]
+        /srcpart/ds=2008-04-09/hr=11 [b]
+        /srcpart/ds=2008-04-09/hr=12 [b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Outer Join 0 to 1
+          filter mappings:
+            1 [0, 1]
+          filter predicates:
+            0 
+            1 {(VALUE.ds = '2008-04-08')}
+          keys:
+            0 key (type: string)
+            1 key (type: string)
+          outputColumnNames: key, value, key0, value0
+          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
+            outputColumnNames: key, value, key0, value0
+            Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key0) > 15.0) and (UDFToDouble(key0) < 25.0)) (type: boolean)
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns key,value,key0,value0
+                      columns.types string:string:string:string
+                      escape.delim \
+                      hive.serialization.extend.additional.nesting.levels true
+                      serialization.escape.crlf true
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key AND b.ds = '2008-04-08')
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19
+PREHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN EXTENDED
+ FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                  tag: 0
+                  value expressions: value (type: string)
+                  auto parallelism: false
+          TableScan
+            alias: b
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: key, value
+                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: key (type: string)
+                  null sort order: a
+                  sort order: +
+                  Map-reduce partition columns: key (type: string)
+                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                  tag: 1
+                  value expressions: value (type: string)
+                  auto parallelism: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: src
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.src
+              numFiles 1
+              numRows 500
+              rawDataSize 5312
+              serialization.ddl struct src { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.src
+                numFiles 1
+                numRows 500
+                rawDataSize 5312
+                serialization.ddl struct src { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                totalSize 5812
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src
+            name: default.src
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /src [a]
+        /srcpart/ds=2008-04-08/hr=11 [b]
+        /srcpart/ds=2008-04-08/hr=12 [b]
+      Needs Tagging: true
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Right Outer Join0 to 1
+          keys:
+            0 key (type: string)
+            1 key (type: string)
+          outputColumnNames: key, value, key0, value0
+          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: key (type: string), value (type: string), key0 (type: string), value0 (type: string)
+            outputColumnNames: key, value, key0, value0
+            Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              isSamplingPred: false
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 0
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    properties:
+                      columns key,value,key0,value0
+                      columns.types string:string:string:string
+                      escape.delim \
+                      hive.serialization.extend.additional.nesting.levels true
+                      serialization.escape.crlf true
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                TotalFiles: 1
+                GatherStats: false
+                MultiFileSpray: false
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: FROM 
+  src a
+ FULL OUTER JOIN 
+  srcpart b 
+ ON (a.key = b.key)
+ SELECT a.key, a.value, b.key, b.value
+ WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+17	val_17	17	val_17
+17	val_17	17	val_17
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+18	val_18	18	val_18
+19	val_19	19	val_19
+19	val_19	19	val_19

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out b/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out
deleted file mode 100644
index ee1c2ae..0000000
--- a/ql/src/test/results/clientpositive/char_udf1.q.java1.7.out
+++ /dev/null
@@ -1,463 +0,0 @@
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@char_udf_1
-PREHOOK: query: insert overwrite table char_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: insert overwrite table char_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@char_udf_1
-POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238val_238	238val_238                    	true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-VAL_238	VAL_238             	true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238             	true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-118	118	true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238|val_238	238|val_238	true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-4	4	true
-PREHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-7	7	true
-PREHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-5	5	true
-PREHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-        val_238	        val_238	true
-PREHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
--- we only allow A regexp B, not regexp (A,B).
-
-select
-  c2 regexp 'val',
-  c4 regexp 'val',
-  (c2 regexp 'val') = (c4 regexp 'val')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
--- we only allow A regexp B, not regexp (A,B).
-
-select
-  c2 regexp 'val',
-  c4 regexp 'val',
-  (c2 regexp 'val') = (c4 regexp 'val')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-true	true	true
-PREHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238	238	true
-PREHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-replaced_238	replaced_238	true
-PREHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-832_lav	832_lav	true
-PREHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238        	val_238        	true
-PREHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as char(50)))
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as char(50)))
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
-PREHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-["val","238"]	["val","238"]
-PREHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-{"b":"2","a":"1","c":"3"}	{"b":"2","a":"1","c":"3"}
-PREHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val	val	true
-PREHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
-PREHOOK: query: select
-  min(c2),
-  min(c4)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  min(c2),
-  min(c4)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238             
-PREHOOK: query: select
-  max(c2),
-  max(c4)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  max(c2),
-  max(c4)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238             
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@char_udf_1
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@char_udf_1
-POSTHOOK: Output: default@char_udf_1


[38/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
deleted file mode 100644
index 899723f..0000000
--- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.8.out
+++ /dev/null
@@ -1,999 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-10 is a root stage
-  Stage-2 depends on stages: Stage-10
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-6 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-6
-  Stage-7 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-10
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '2') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: 0 (type: bigint)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Semi Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key > '2') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col5
-          Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col5 is null (type: boolean)
-            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.src_5
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: key (type: string), value (type: string)
-              sort order: ++
-              Map-reduce partition columns: key (type: string), value (type: string)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '9') and value is not null) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Semi Join 0 to 1
-          keys:
-            0 key (type: string), value (type: string)
-            1 _col0 (type: string), _col1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.src_4
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-7
-    Stats-Aggr Operator
-
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-10:MAPRED
-RUN: Stage-2:MAPRED
-RUN: Stage-3:MAPRED
-RUN: Stage-6:MAPRED
-RUN: Stage-4:MAPRED
-RUN: Stage-0:MOVE
-RUN: Stage-1:MOVE
-RUN: Stage-7:STATS
-RUN: Stage-5:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2
-Warning: Map Join MAPJOIN[55][bigTable=b] in task 'Stage-13:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-10 is a root stage
-  Stage-14 depends on stages: Stage-10 , consists of Stage-17, Stage-2
-  Stage-17 has a backup stage: Stage-2
-  Stage-13 depends on stages: Stage-17
-  Stage-15 depends on stages: Stage-2, Stage-13
-  Stage-4 depends on stages: Stage-15
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-16 depends on stages: Stage-2, Stage-13
-  Stage-12 depends on stages: Stage-16
-  Stage-0 depends on stages: Stage-12
-  Stage-7 depends on stages: Stage-0
-  Stage-2
-
-STAGE PLANS:
-  Stage: Stage-10
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '2') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: 0 (type: bigint)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-14
-    Conditional Operator
-
-  Stage: Stage-17
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        $INTNAME 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        $INTNAME 
-          TableScan
-            HashTable Sink Operator
-              keys:
-                0 
-                1 
-
-  Stage: Stage-13
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Map Join Operator
-              condition map:
-                   Left Semi Join 0 to 1
-              keys:
-                0 
-                1 
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-15
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        sq_2:s1 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        sq_2:s1 
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key > '2') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                HashTable Sink Operator
-                  keys:
-                    0 _col0 (type: string)
-                    1 _col0 (type: string)
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Map Join Operator
-              condition map:
-                   Left Outer Join0 to 1
-              keys:
-                0 _col0 (type: string)
-                1 _col0 (type: string)
-              outputColumnNames: _col0, _col1, _col5
-              Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: _col5 is null (type: boolean)
-                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string), _col1 (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: string)
-      Local Work:
-        Map Reduce Local Work
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.src_5
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-16
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        sq_1:a 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        sq_1:a 
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '9') and value is not null) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  HashTable Sink Operator
-                    keys:
-                      0 key (type: string), value (type: string)
-                      1 _col0 (type: string), _col1 (type: string)
-
-  Stage: Stage-12
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Map Join Operator
-              condition map:
-                   Left Semi Join 0 to 1
-              keys:
-                0 key (type: string), value (type: string)
-                1 _col0 (type: string), _col1 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src_4
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-7
-    Stats-Aggr Operator
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Semi Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-Warning: Map Join MAPJOIN[55][bigTable=b] in task 'Stage-13:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-10:MAPRED
-RUN: Stage-14:CONDITIONAL
-RUN: Stage-17:MAPREDLOCAL
-RUN: Stage-13:MAPRED
-RUN: Stage-15:MAPREDLOCAL
-RUN: Stage-16:MAPREDLOCAL
-RUN: Stage-4:MAPRED
-RUN: Stage-12:MAPRED
-RUN: Stage-1:MOVE
-RUN: Stage-0:MOVE
-RUN: Stage-5:STATS
-RUN: Stage-7:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
new file mode 100644
index 0000000..ff3abc4
--- /dev/null
+++ b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out
@@ -0,0 +1,997 @@
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE src_4(
+  key STRING, 
+  value STRING
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_4
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+CREATE TABLE src_4(
+  key STRING, 
+  value STRING
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_4
+RUN: Stage-0:DDL
+PREHOOK: query: CREATE TABLE src_5( 
+  key STRING, 
+  value STRING
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_5
+POSTHOOK: query: CREATE TABLE src_5( 
+  key STRING, 
+  value STRING
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_5
+RUN: Stage-0:DDL
+Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: explain
+from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-10 is a root stage
+  Stage-2 depends on stages: Stage-10
+  Stage-3 depends on stages: Stage-2
+  Stage-4 depends on stages: Stage-3
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
+  Stage-6 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-6
+  Stage-7 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-10
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: s1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > '2') and key is null) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: 0 (type: bigint)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              value expressions: key (type: string), value (type: string)
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Semi Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: string)
+          TableScan
+            alias: s1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > '2') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col5
+          Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: _col5 is null (type: boolean)
+            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: string)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.src_5
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_5
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: key (type: string), value (type: string)
+              sort order: ++
+              Map-reduce partition columns: key (type: string), value (type: string)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > '9') and value is not null) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string), _col1 (type: string)
+                    sort order: ++
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Semi Join 0 to 1
+          keys:
+            0 key (type: string), value (type: string)
+            1 _col0 (type: string), _col1 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.src_4
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_4
+
+  Stage: Stage-7
+    Stats-Aggr Operator
+
+Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_4
+PREHOOK: Output: default@src_5
+POSTHOOK: query: from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_4
+POSTHOOK: Output: default@src_5
+POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+RUN: Stage-10:MAPRED
+RUN: Stage-2:MAPRED
+RUN: Stage-3:MAPRED
+RUN: Stage-6:MAPRED
+RUN: Stage-4:MAPRED
+RUN: Stage-0:MOVE
+RUN: Stage-1:MOVE
+RUN: Stage-7:STATS
+RUN: Stage-5:STATS
+PREHOOK: query: select * from src_4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_4
+#### A masked pattern was here ####
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: select * from src_5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_5
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_5
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+105	val_105
+11	val_11
+111	val_111
+113	val_113
+113	val_113
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+119	val_119
+119	val_119
+119	val_119
+12	val_12
+12	val_12
+120	val_120
+120	val_120
+125	val_125
+125	val_125
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+129	val_129
+129	val_129
+131	val_131
+133	val_133
+134	val_134
+134	val_134
+136	val_136
+137	val_137
+137	val_137
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+143	val_143
+145	val_145
+146	val_146
+146	val_146
+149	val_149
+149	val_149
+15	val_15
+15	val_15
+150	val_150
+152	val_152
+152	val_152
+153	val_153
+155	val_155
+156	val_156
+157	val_157
+158	val_158
+160	val_160
+162	val_162
+163	val_163
+164	val_164
+164	val_164
+165	val_165
+165	val_165
+166	val_166
+167	val_167
+167	val_167
+167	val_167
+168	val_168
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+17	val_17
+170	val_170
+172	val_172
+172	val_172
+174	val_174
+174	val_174
+175	val_175
+175	val_175
+176	val_176
+176	val_176
+177	val_177
+178	val_178
+179	val_179
+179	val_179
+18	val_18
+18	val_18
+180	val_180
+181	val_181
+183	val_183
+186	val_186
+187	val_187
+187	val_187
+187	val_187
+189	val_189
+19	val_19
+190	val_190
+191	val_191
+191	val_191
+192	val_192
+193	val_193
+193	val_193
+193	val_193
+194	val_194
+195	val_195
+195	val_195
+196	val_196
+197	val_197
+197	val_197
+199	val_199
+199	val_199
+199	val_199
+2	val_2
+Warning: Map Join MAPJOIN[55][bigTable=b] in task 'Stage-13:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: explain
+from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-10 is a root stage
+  Stage-14 depends on stages: Stage-10 , consists of Stage-17, Stage-2
+  Stage-17 has a backup stage: Stage-2
+  Stage-13 depends on stages: Stage-17
+  Stage-15 depends on stages: Stage-2, Stage-13
+  Stage-4 depends on stages: Stage-15
+  Stage-1 depends on stages: Stage-4
+  Stage-5 depends on stages: Stage-1
+  Stage-16 depends on stages: Stage-2, Stage-13
+  Stage-12 depends on stages: Stage-16
+  Stage-0 depends on stages: Stage-12
+  Stage-7 depends on stages: Stage-0
+  Stage-2
+
+STAGE PLANS:
+  Stage: Stage-10
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: s1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > '2') and key is null) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                keys: 0 (type: bigint)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-14
+    Conditional Operator
+
+  Stage: Stage-17
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        $INTNAME 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        $INTNAME 
+          TableScan
+            HashTable Sink Operator
+              keys:
+                0 
+                1 
+
+  Stage: Stage-13
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Map Join Operator
+              condition map:
+                   Left Semi Join 0 to 1
+              keys:
+                0 
+                1 
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-15
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        sq_2:s1 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        sq_2:s1 
+          TableScan
+            alias: s1
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > '2') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                HashTable Sink Operator
+                  keys:
+                    0 _col0 (type: string)
+                    1 _col0 (type: string)
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Map Join Operator
+              condition map:
+                   Left Outer Join0 to 1
+              keys:
+                0 _col0 (type: string)
+                1 _col0 (type: string)
+              outputColumnNames: _col0, _col1, _col5
+              Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+              Filter Operator
+                predicate: _col5 is null (type: boolean)
+                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string), _col1 (type: string)
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: string)
+      Local Work:
+        Map Reduce Local Work
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                name: default.src_5
+
+  Stage: Stage-1
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_5
+
+  Stage: Stage-5
+    Stats-Aggr Operator
+
+  Stage: Stage-16
+    Map Reduce Local Work
+      Alias -> Map Local Tables:
+        sq_1:a 
+          Fetch Operator
+            limit: -1
+      Alias -> Map Local Operator Tree:
+        sq_1:a 
+          TableScan
+            alias: a
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key > '9') and value is not null) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  keys: _col0 (type: string), _col1 (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  HashTable Sink Operator
+                    keys:
+                      0 key (type: string), value (type: string)
+                      1 _col0 (type: string), _col1 (type: string)
+
+  Stage: Stage-12
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Map Join Operator
+              condition map:
+                   Left Semi Join 0 to 1
+              keys:
+                0 key (type: string), value (type: string)
+                1 _col0 (type: string), _col1 (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src_4
+      Local Work:
+        Map Reduce Local Work
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: true
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.src_4
+
+  Stage: Stage-7
+    Stats-Aggr Operator
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              value expressions: key (type: string), value (type: string)
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Semi Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+Warning: Map Join MAPJOIN[55][bigTable=b] in task 'Stage-13:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@src_4
+PREHOOK: Output: default@src_5
+POSTHOOK: query: from src b 
+INSERT OVERWRITE TABLE src_4 
+  select * 
+  where b.key in 
+   (select a.key 
+    from src a 
+    where b.value = a.value and a.key > '9'
+   ) 
+INSERT OVERWRITE TABLE src_5 
+  select *  
+  where b.key not in  ( select key from src s1 where s1.key > '2') 
+  order by key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@src_4
+POSTHOOK: Output: default@src_5
+POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+RUN: Stage-10:MAPRED
+RUN: Stage-14:CONDITIONAL
+RUN: Stage-17:MAPREDLOCAL
+RUN: Stage-13:MAPRED
+RUN: Stage-15:MAPREDLOCAL
+RUN: Stage-16:MAPREDLOCAL
+RUN: Stage-4:MAPRED
+RUN: Stage-12:MAPRED
+RUN: Stage-1:MOVE
+RUN: Stage-0:MOVE
+RUN: Stage-5:STATS
+RUN: Stage-7:STATS
+PREHOOK: query: select * from src_4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_4
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_4
+#### A masked pattern was here ####
+90	val_90
+90	val_90
+90	val_90
+92	val_92
+95	val_95
+95	val_95
+96	val_96
+97	val_97
+97	val_97
+98	val_98
+98	val_98
+PREHOOK: query: select * from src_5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_5
+#### A masked pattern was here ####
+POSTHOOK: query: select * from src_5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_5
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104
+105	val_105
+11	val_11
+111	val_111
+113	val_113
+113	val_113
+114	val_114
+116	val_116
+118	val_118
+118	val_118
+119	val_119
+119	val_119
+119	val_119
+12	val_12
+12	val_12
+120	val_120
+120	val_120
+125	val_125
+125	val_125
+126	val_126
+128	val_128
+128	val_128
+128	val_128
+129	val_129
+129	val_129
+131	val_131
+133	val_133
+134	val_134
+134	val_134
+136	val_136
+137	val_137
+137	val_137
+138	val_138
+138	val_138
+138	val_138
+138	val_138
+143	val_143
+145	val_145
+146	val_146
+146	val_146
+149	val_149
+149	val_149
+15	val_15
+15	val_15
+150	val_150
+152	val_152
+152	val_152
+153	val_153
+155	val_155
+156	val_156
+157	val_157
+158	val_158
+160	val_160
+162	val_162
+163	val_163
+164	val_164
+164	val_164
+165	val_165
+165	val_165
+166	val_166
+167	val_167
+167	val_167
+167	val_167
+168	val_168
+169	val_169
+169	val_169
+169	val_169
+169	val_169
+17	val_17
+170	val_170
+172	val_172
+172	val_172
+174	val_174
+174	val_174
+175	val_175
+175	val_175
+176	val_176
+176	val_176
+177	val_177
+178	val_178
+179	val_179
+179	val_179
+18	val_18
+18	val_18
+180	val_180
+181	val_181
+183	val_183
+186	val_186
+187	val_187
+187	val_187
+187	val_187
+189	val_189
+19	val_19
+190	val_190
+191	val_191
+191	val_191
+192	val_192
+193	val_193
+193	val_193
+193	val_193
+194	val_194
+195	val_195
+195	val_195
+196	val_196
+197	val_197
+197	val_197
+199	val_199
+199	val_199
+199	val_199
+2	val_2


[50/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
deleted file mode 100644
index 1960d41..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.8.out
+++ /dev/null
@@ -1,1119 +0,0 @@
-PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_dynamic_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            TOK_FUNCTION
-               if
-               ==
-                  %
-                     TOK_TABLE_OR_COL
-                        key
-                     100
-                  0
-               'a1'
-               'b1'
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-ds=2008-04-08/hr=a1
-ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, a1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	2                   
-	numRows             	16                  
-	rawDataSize         	136                 
-	totalSize           	310                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	6                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10734               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_dynamic_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            TOK_FUNCTION
-               if
-               ==
-                  %
-                     TOK_TABLE_OR_COL
-                        key
-                     100
-                  0
-               'a1'
-               'b1'
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
-  Stage-4
-  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
-  Stage-2 depends on stages: Stage-0
-  Stage-3
-  Stage-5
-  Stage-6 depends on stages: Stage-5
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns hr
-                      partition_columns.types string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-7
-    Conditional Operator
-
-  Stage: Stage-4
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns hr
-                partition_columns.types string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-  Stage: Stage-3
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              partition_columns hr
-              partition_columns.types string
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns hr
-                partition_columns.types string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-5
-    Merge File Operator
-      Map Operator Tree:
-          RCFile Merge Operator
-      merge level: block
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            properties:
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              partition_columns hr
-              partition_columns.types string
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns hr
-                partition_columns.types string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-      Truncated Path -> Alias:
-#### A masked pattern was here ####
-
-  Stage: Stage-6
-    Move Operator
-      files:
-          hdfs directory: true
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-ds=2008-04-08/hr=a1
-ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, a1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	1                   
-	numRows             	16                  
-	rawDataSize         	136                 
-	totalSize           	254                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10622               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_dynamic_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_dynamic_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_dynamic_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            =
-               TOK_TABLE_OR_COL
-                  key
-               '484'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               'val_484'
-
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr a1
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 1
-              numRows 16
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 136
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 254
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr b1
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 4
-              numRows 984
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9488
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10622
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_dynamic_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-484	val_484	2008-04-08	b1
-484	val_484	2008-04-08	b1
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part


[41/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
index 217fe76..dfa6ea5 100644
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out
@@ -20,90 +20,6 @@ EXPLAIN EXTENDED
  SELECT a.key, a.value, b.key, b.value
  WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_FULLOUTERJOIN
-         TOK_TABREF
-            TOK_TABNAME
-               src
-            a
-         TOK_TABREF
-            TOK_TABNAME
-               srcpart
-            b
-         AND
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     a
-                  key
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  key
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  ds
-               '2008-04-08'
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               value
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               value
-      TOK_WHERE
-         AND
-            AND
-               AND
-                  >
-                     .
-                        TOK_TABLE_OR_COL
-                           a
-                        key
-                     10
-                  <
-                     .
-                        TOK_TABLE_OR_COL
-                           a
-                        key
-                     20
-               >
-                  .
-                     TOK_TABLE_OR_COL
-                        b
-                     key
-                  15
-            <
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  key
-               25
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -112,7 +28,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Map 3 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 3 (PARTITION-LEVEL SORT, 4)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -121,14 +37,19 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
-                  Reduce Output Operator
-                    key expressions: key (type: string)
-                    sort order: +
-                    Map-reduce partition columns: key (type: string)
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    tag: 0
-                    value expressions: value (type: string)
-                    auto parallelism: false
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      tag: 0
+                      value expressions: _col1 (type: string)
+                      auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -138,7 +59,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -158,7 +79,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -184,14 +105,19 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
-                  Reduce Output Operator
-                    key expressions: key (type: string)
-                    sort order: +
-                    Map-reduce partition columns: key (type: string)
+                  Select Operator
+                    expressions: key (type: string), value (type: string), ds (type: string)
+                    outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    tag: 1
-                    value expressions: value (type: string), ds (type: string)
-                    auto parallelism: false
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      null sort order: a
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      tag: 1
+                      value expressions: _col1 (type: string), _col2 (type: string)
+                      auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -204,7 +130,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -250,7 +176,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -296,7 +222,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -342,7 +268,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -396,39 +322,36 @@ STAGE PLANS:
                   0 
                   1 {(VALUE._col1 = '2008-04-08')}
                 keys:
-                  0 key (type: string)
-                  1 key (type: string)
-                outputColumnNames: _col0, _col1, _col5, _col6
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   isSamplingPred: false
-                  predicate: ((((_col5 > 15) and (_col5 < 25)) and (_col0 > 10)) and (_col0 < 20)) (type: boolean)
+                  predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0) and (UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
                   Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
                     Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          properties:
-                            columns _col0,_col1,_col2,_col3
-                            columns.types string:string:string:string
-                            escape.delim \
-                            hive.serialization.extend.additional.nesting.levels true
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
 
   Stage: Stage-0
     Fetch Operator
@@ -496,90 +419,6 @@ POSTHOOK: query: EXPLAIN EXTENDED
  SELECT a.key, a.value, b.key, b.value
  WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
 POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_FULLOUTERJOIN
-         TOK_TABREF
-            TOK_TABNAME
-               src
-            a
-         TOK_TABREF
-            TOK_TABNAME
-               srcpart
-            b
-         =
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               value
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               value
-      TOK_WHERE
-         AND
-            AND
-               AND
-                  AND
-                     >
-                        .
-                           TOK_TABLE_OR_COL
-                              a
-                           key
-                        10
-                     <
-                        .
-                           TOK_TABLE_OR_COL
-                              a
-                           key
-                        20
-                  >
-                     .
-                        TOK_TABLE_OR_COL
-                           b
-                        key
-                     15
-               <
-                  .
-                     TOK_TABLE_OR_COL
-                        b
-                     key
-                  25
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  ds
-               '2008-04-08'
-
-
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
   Stage-0 depends on stages: Stage-1
@@ -588,7 +427,7 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 3 (PARTITION-LEVEL SORT, 4)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -597,14 +436,23 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
-                  Reduce Output Operator
-                    key expressions: key (type: string)
-                    sort order: +
-                    Map-reduce partition columns: key (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    tag: 0
-                    value expressions: value (type: string)
-                    auto parallelism: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
+                        tag: 0
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -614,7 +462,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -634,7 +482,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE true
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -658,16 +506,25 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: b
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   GatherStats: false
-                  Reduce Output Operator
-                    key expressions: key (type: string)
-                    sort order: +
-                    Map-reduce partition columns: key (type: string)
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    tag: 1
-                    value expressions: value (type: string), ds (type: string)
-                    auto parallelism: false
+                  Filter Operator
+                    isSamplingPred: false
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        null sort order: a
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
+                        tag: 1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -680,7 +537,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -726,99 +583,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE true
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -858,48 +623,43 @@ STAGE PLANS:
             Truncated Path -> Alias:
               /srcpart/ds=2008-04-08/hr=11 [b]
               /srcpart/ds=2008-04-08/hr=12 [b]
-              /srcpart/ds=2008-04-09/hr=11 [b]
-              /srcpart/ds=2008-04-09/hr=12 [b]
         Reducer 2 
             Needs Tagging: true
             Reduce Operator Tree:
               Join Operator
                 condition map:
-                     Outer Join 0 to 1
+                     Right Outer Join0 to 1
                 keys:
-                  0 key (type: string)
-                  1 key (type: string)
-                outputColumnNames: _col0, _col1, _col5, _col6, _col7
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   isSamplingPred: false
-                  predicate: (((((_col5 > 15) and (_col5 < 25)) and (_col7 = '2008-04-08')) and (_col0 > 10)) and (_col0 < 20)) (type: boolean)
+                  predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean)
                   Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                  File Output Operator
+                    compressed: false
+                    GlobalTableId: 0
+#### A masked pattern was here ####
+                    NumFilesPerFileSink: 1
                     Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          properties:
-                            columns _col0,_col1,_col2,_col3
-                            columns.types string:string:string:string
-                            escape.delim \
-                            hive.serialization.extend.additional.nesting.levels true
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
+#### A masked pattern was here ####
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        properties:
+                          columns _col0,_col1,_col2,_col3
+                          columns.types string:string:string:string
+                          escape.delim \
+                          hive.serialization.extend.additional.nesting.levels true
+                          serialization.escape.crlf true
+                          serialization.format 1
+                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    TotalFiles: 1
+                    GatherStats: false
+                    MultiFileSpray: false
 
   Stage: Stage-0
     Fetch Operator
@@ -919,8 +679,6 @@ PREHOOK: Input: default@src
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: FROM 
   src a
@@ -934,8 +692,6 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 17	val_17	17	val_17
 17	val_17	17	val_17

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.7.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.7.out
deleted file mode 100644
index b43ea5c..0000000
--- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.7.out
+++ /dev/null
@@ -1,886 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [sq_2_notin_nullcheck]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
-  Stage-1 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-2
-    Spark
-      Edges:
-        Reducer 2 <- Map 10 (PARTITION-LEVEL SORT, 1), Reducer 9 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2)
-        Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 2), Map 6 (PARTITION-LEVEL SORT, 2)
-        Reducer 9 <- Map 8 (GROUP, 1)
-        Reducer 4 <- Reducer 3 (SORT, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 10 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: key (type: string), value (type: string)
-        Map 11 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: key (type: string), value (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: key (type: string), value (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-        Map 6 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '9') and value is not null) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-        Map 7 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key > '2') (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-        Map 8 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '2') and key is null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Semi Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Outer Join0 to 1
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col5
-                Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: _col5 is null (type: boolean)
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.src_5
-        Reducer 5 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Semi Join 0 to 1
-                keys:
-                  0 key (type: string), value (type: string)
-                  1 _col0 (type: string), _col1 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.src_4
-        Reducer 9 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: 0 (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-Warning: Shuffle Join JOIN[31][tables = [sq_2_notin_nullcheck]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-2:MAPRED
-RUN: Stage-1:MOVE
-RUN: Stage-0:MOVE
-RUN: Stage-3:STATS
-RUN: Stage-4:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2
-Warning: Map Join MAPJOIN[46][bigTable=b] in task 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-5 is a root stage
-  Stage-2 depends on stages: Stage-5
-  Stage-1 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-5
-    Spark
-      Edges:
-        Reducer 6 <- Map 5 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '9') and value is not null) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Spark HashTable Sink Operator
-                          keys:
-                            0 key (type: string), value (type: string)
-                            1 _col0 (type: string), _col1 (type: string)
-            Local Work:
-              Map Reduce Local Work
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key > '2') (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Spark HashTable Sink Operator
-                        keys:
-                          0 _col0 (type: string)
-                          1 _col0 (type: string)
-            Local Work:
-              Map Reduce Local Work
-        Map 5 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '2') and key is null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-        Reducer 6 
-            Local Work:
-              Map Reduce Local Work
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: 0 (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      Spark HashTable Sink Operator
-                        keys:
-                          0 
-                          1 
-
-  Stage: Stage-2
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Map Join Operator
-                    condition map:
-                         Left Semi Join 0 to 1
-                    keys:
-                      0 
-                      1 
-                    outputColumnNames: _col0, _col1
-                    input vertices:
-                      1 Reducer 6
-                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                    Map Join Operator
-                      condition map:
-                           Left Outer Join0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 _col0 (type: string)
-                      outputColumnNames: _col0, _col1, _col5
-                      input vertices:
-                        1 Map 4
-                      Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                      Filter Operator
-                        predicate: _col5 is null (type: boolean)
-                        Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                        Select Operator
-                          expressions: _col0 (type: string), _col1 (type: string)
-                          outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            key expressions: _col0 (type: string)
-                            sort order: +
-                            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col1 (type: string)
-                  Map Join Operator
-                    condition map:
-                         Left Semi Join 0 to 1
-                    keys:
-                      0 key (type: string), value (type: string)
-                      1 _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1
-                    input vertices:
-                      1 Map 3
-                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          name: default.src_4
-            Local Work:
-              Map Reduce Local Work
-        Reducer 2 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.src_5
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-Warning: Map Join MAPJOIN[46][bigTable=b] in task 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-5:MAPRED
-RUN: Stage-2:MAPRED
-RUN: Stage-1:MOVE
-RUN: Stage-0:MOVE
-RUN: Stage-3:STATS
-RUN: Stage-4:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2


[20/66] [abbrv] hive git commit: HIVE-13799 : Optimize TableScanRule::checkBucketedTable (Rajesh Balamohan via Gopal V)

Posted by sp...@apache.org.
HIVE-13799 :  Optimize TableScanRule::checkBucketedTable (Rajesh Balamohan via Gopal V)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8a679589
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8a679589
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8a679589

Branch: refs/heads/java8
Commit: 8a6795895f9c8cdeb729089873404d30f2b7e4b2
Parents: c229f99
Author: Rajesh Balamohan <rbalamohan at apache dot org>
Authored: Thu May 19 19:32:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue May 24 08:45:52 2016 -0700

----------------------------------------------------------------------
 .../metainfo/annotation/OpTraitsRulesProcFactory.java  | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8a679589/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
index 7149f5c..1e89016 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java
@@ -125,6 +125,11 @@ public class OpTraitsRulesProcFactory {
     public boolean checkBucketedTable(Table tbl, ParseContext pGraphContext,
         PrunedPartitionList prunedParts) throws SemanticException {
 
+      final int numBuckets = tbl.getNumBuckets();
+      if (numBuckets <= 0) {
+        return false;
+      }
+
       if (tbl.isPartitioned()) {
         List<Partition> partitions = prunedParts.getNotDeniedPartns();
         // construct a mapping of (Partition->bucket file names) and (Partition -> bucket number)
@@ -135,9 +140,7 @@ public class OpTraitsRulesProcFactory {
                     pGraphContext);
             // The number of files for the table should be same as number of
             // buckets.
-            int bucketCount = p.getBucketCount();
-
-            if (fileNames.size() != 0 && fileNames.size() != bucketCount) {
+            if (fileNames.size() != 0 && fileNames.size() != numBuckets) {
               return false;
             }
           }
@@ -147,10 +150,8 @@ public class OpTraitsRulesProcFactory {
         List<String> fileNames =
             AbstractBucketJoinProc.getBucketFilePathsOfPartition(tbl.getDataLocation(),
                 pGraphContext);
-        Integer num = new Integer(tbl.getNumBuckets());
-
         // The number of files for the table should be same as number of buckets.
-        if (fileNames.size() != 0 && fileNames.size() != num) {
+        if (fileNames.size() != 0 && fileNames.size() != numBuckets) {
           return false;
         }
       }


[14/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
index 3b24a2e..387dfee 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out
@@ -1538,29 +1538,29 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col1, _col2
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col2)
-                keys: 'day' (type: string), _col1 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 'day' (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: 'day' (type: string), _col1 (type: string)
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: 'day' (type: string), KEY._col1 (type: string)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string)
+            expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), 'day' (type: string)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -1654,29 +1654,29 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col1, _col2
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col2)
-                keys: 'day' (type: string), _col1 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: 'day' (type: string), _col1 (type: string)
-                  sort order: ++
-                  Map-reduce partition columns: 'day' (type: string), _col1 (type: string)
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: 'day' (type: string), KEY._col1 (type: string)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string)
+            expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), 'day' (type: string)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/explain_logical.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out
index 5b8a422..3b6f32c 100644
--- a/ql/src/test/results/clientpositive/explain_logical.q.out
+++ b/ql/src/test/results/clientpositive/explain_logical.q.out
@@ -356,35 +356,34 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V4
 POSTHOOK: type: QUERY
 LOGICAL PLAN:
-$hdt$_0:srcpart 
+$hdt$_0:src 
   TableScan (TS_0)
-    alias: srcpart
-    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+    alias: src
+    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
     Filter Operator (FIL_15)
       predicate: key is not null (type: boolean)
-      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Select Operator (SEL_2)
-        expressions: key (type: string), value (type: string)
-        outputColumnNames: _col0, _col1
-        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+        expressions: key (type: string)
+        outputColumnNames: _col0
+        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reduce Output Operator (RS_9)
           key expressions: _col0 (type: string)
           sort order: +
           Map-reduce partition columns: _col0 (type: string)
-          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-          value expressions: _col1 (type: string)
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
-                 Inner Join 0 to 2
+                 Inner Join 1 to 2
             keys:
               0 _col0 (type: string)
               1 _col0 (type: string)
               2 _col0 (type: string)
-            outputColumnNames: _col1, _col2, _col4
+            outputColumnNames: _col0, _col2, _col4
             Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
             Select Operator (SEL_13)
-              expressions: _col2 (type: string), _col1 (type: string), _col4 (type: string)
+              expressions: _col0 (type: string), _col2 (type: string), _col4 (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
               File Output Operator (FS_14)
@@ -394,31 +393,32 @@ $hdt$_0:srcpart
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-$hdt$_1:src 
+$hdt$_1:srcpart 
   TableScan (TS_3)
-    alias: src
-    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+    alias: srcpart
+    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
     Filter Operator (FIL_16)
       predicate: key is not null (type: boolean)
-      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
       Select Operator (SEL_5)
-        expressions: key (type: string)
-        outputColumnNames: _col0
-        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+        expressions: key (type: string), value (type: string)
+        outputColumnNames: _col0, _col1
+        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Reduce Output Operator (RS_10)
           key expressions: _col0 (type: string)
           sort order: +
           Map-reduce partition columns: _col0 (type: string)
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+          value expressions: _col1 (type: string)
           Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
-                 Inner Join 0 to 2
+                 Inner Join 1 to 2
             keys:
               0 _col0 (type: string)
               1 _col0 (type: string)
               2 _col0 (type: string)
-            outputColumnNames: _col1, _col2, _col4
+            outputColumnNames: _col0, _col2, _col4
             Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
 $hdt$_2:src 
   TableScan (TS_6)
@@ -440,12 +440,12 @@ $hdt$_2:src
           Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
-                 Inner Join 0 to 2
+                 Inner Join 1 to 2
             keys:
               0 _col0 (type: string)
               1 _col0 (type: string)
               2 _col0 (type: string)
-            outputColumnNames: _col1, _col2, _col4
+            outputColumnNames: _col0, _col2, _col4
             Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE
 
 PREHOOK: query: -- The table should show up in the explain logical even if none

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
index 132b590..46b701f 100644
--- a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
@@ -317,7 +317,7 @@ STAGE PLANS:
                Inner Join 0 to 1
           keys:
             0 UDFToDouble(_col0) (type: double), _col0 (type: string)
-            1 UDFToDouble(1) (type: double), _col0 (type: string)
+            1 1.0 (type: double), _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col5
           Statistics: Num rows: 22 Data size: 288 Basic stats: COMPLETE Column stats: NONE
           Filter Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/fold_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_case.q.out b/ql/src/test/results/clientpositive/fold_case.q.out
index 53139da..f57da79 100644
--- a/ql/src/test/results/clientpositive/fold_case.q.out
+++ b/ql/src/test/results/clientpositive/fold_case.q.out
@@ -267,18 +267,20 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-              Group By Operator
-                aggregations: count(1)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
+            Select Operator
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+              Filter Operator
+                predicate: false (type: boolean)
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count(1)
+                  mode: hash
+                  outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: bigint)
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -405,20 +407,22 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Filter Operator
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-              Select Operator
-                expressions: null (type: void)
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                File Output Operator
-                  compressed: false
+            Select Operator
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+              Filter Operator
+                predicate: false (type: boolean)
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: null (type: void)
+                  outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -444,10 +448,10 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (not (key = '238')) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              predicate: (key <> '238') (type: boolean)
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(1)
                   mode: hash
@@ -497,7 +501,7 @@ STAGE PLANS:
           alias: src
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
-            expressions: null (type: void)
+            expressions: null (type: string)
             outputColumnNames: _col0
             Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
             ListSink

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out b/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out
index 106ad6b..13f6ab4 100644
--- a/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out
+++ b/ql/src/test/results/clientpositive/fold_eq_with_case_when.q.out
@@ -173,10 +173,10 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: CASE WHEN ((key <> '238')) THEN ((key = '238')) ELSE ((key = '238')) END (type: boolean)
+              predicate: (key = '238') (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string)
+                expressions: '238' (type: string)
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/fold_when.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fold_when.q.out b/ql/src/test/results/clientpositive/fold_when.q.out
index 5b68408..4f3eb14 100644
--- a/ql/src/test/results/clientpositive/fold_when.q.out
+++ b/ql/src/test/results/clientpositive/fold_when.q.out
@@ -156,7 +156,7 @@ STAGE PLANS:
               predicate: (key = '238') (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string)
+                expressions: '238' (type: string)
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -194,7 +194,7 @@ STAGE PLANS:
               predicate: (key = '238') (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string)
+                expressions: '238' (type: string)
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/folder_predicate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/folder_predicate.q.out b/ql/src/test/results/clientpositive/folder_predicate.q.out
index 7fcc172..48a4889 100644
--- a/ql/src/test/results/clientpositive/folder_predicate.q.out
+++ b/ql/src/test/results/clientpositive/folder_predicate.q.out
@@ -37,15 +37,15 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value <> 3)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+              predicate: (not (value is not null and (value = 3))) (type: boolean)
+              Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 3 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -88,15 +88,15 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value < 3)) (type: boolean)
-              Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+              predicate: (not (value is not null and (value >= 3))) (type: boolean)
+              Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -137,15 +137,15 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value > 3)) (type: boolean)
-              Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+              predicate: (not (value is not null and (value <= 3))) (type: boolean)
+              Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -186,15 +186,15 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value <= 3)) (type: boolean)
-              Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+              predicate: (not (value is not null and (value > 3))) (type: boolean)
+              Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -236,15 +236,15 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value >= 3)) (type: boolean)
-              Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+              predicate: (not (value is not null and (value < 3))) (type: boolean)
+              Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 5 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 4 Data size: 4 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -286,15 +286,15 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value = 3)) (type: boolean)
-              Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+              predicate: (not (value is not null and (value <> 3))) (type: boolean)
+              Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -334,7 +334,7 @@ STAGE PLANS:
             alias: predicate_fold_tb
             Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (value is null or (value <= 1) or (value > 3)) (type: boolean)
+              predicate: (not (value is not null and (value > 1) and (value <= 3))) (type: boolean)
               Statistics: Num rows: 6 Data size: 7 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/groupby_ppd.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out
index 515f62e..d411a0d 100644
--- a/ql/src/test/results/clientpositive/groupby_ppd.q.out
+++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out
@@ -32,20 +32,16 @@ STAGE PLANS:
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: int)
-                    outputColumnNames: _col1
+                  Group By Operator
+                    keys: _col0 (type: int)
+                    mode: hash
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    Group By Operator
-                      keys: 1 (type: int), _col1 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
                       Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: 1 (type: int), _col1 (type: int)
-                        sort order: ++
-                        Map-reduce partition columns: 1 (type: int), _col1 (type: int)
-                        Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           TableScan
             alias: c
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -58,28 +54,24 @@ STAGE PLANS:
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: int)
-                    outputColumnNames: _col1
+                  Group By Operator
+                    keys: _col0 (type: int)
+                    mode: hash
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    Group By Operator
-                      keys: 1 (type: int), _col1 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: int)
                       Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: 1 (type: int), _col1 (type: int)
-                        sort order: ++
-                        Map-reduce partition columns: 1 (type: int), _col1 (type: int)
-                        Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: 1 (type: int), KEY._col1 (type: int)
+          keys: KEY._col0 (type: int)
           mode: mergepartial
-          outputColumnNames: _col0, _col1
+          outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: int), 1 (type: int)
+            expressions: _col0 (type: int), 1 (type: int)
             outputColumnNames: _col0, _col1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
index 7ef56fc..81fe0d9 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
@@ -1387,16 +1387,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col1
+              outputColumnNames: key
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: 1 (type: int), _col1 (type: string)
+                aggregations: count(key)
+                keys: key (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: 1 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
+                  expressions: 1 (type: int), UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -1482,7 +1482,7 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [$hdt$_0:t1]
+        /t1 [t1]
 
   Stage: Stage-7
     Conditional Operator
@@ -1708,7 +1708,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1757,22 +1757,22 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: key, val
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                aggregations: count(val)
+                keys: key (type: string), val (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), 1 (type: int), _col2 (type: string)
-                  null sort order: aaa
-                  sort order: +++
-                  Map-reduce partition columns: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  null sort order: aa
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   tag: -1
-                  value expressions: _col3 (type: bigint)
+                  value expressions: _col2 (type: bigint)
                   auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -1826,17 +1826,17 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [$hdt$_0:t1]
+        /t1 [t1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), 1 (type: int), KEY._col2 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2, _col3
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+            expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
             Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -1912,7 +1912,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -4123,16 +4123,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: key, val
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                aggregations: count(val)
+                keys: key (type: string), val (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -4218,7 +4218,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [$hdt$_0:t2]
+        /t2 [t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -4444,7 +4444,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -4502,16 +4502,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: key, val
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int)
+                aggregations: count(val)
+                keys: key (type: string), val (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), 2 (type: int), UDFToInteger(_col4) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), 2 (type: int), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -4597,7 +4597,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [$hdt$_0:t2]
+        /t2 [t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -4823,7 +4823,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -4878,16 +4878,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                keys: _col0 (type: string), _col1 (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -5261,16 +5261,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                keys: _col0 (type: string), 2 (type: int), _col2 (type: string)
+                keys: _col0 (type: string), _col1 (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
index 2819487..5cf0ea2 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
@@ -1453,16 +1453,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col1
+              outputColumnNames: key
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: 1 (type: int), _col1 (type: string)
+                aggregations: count(key)
+                keys: key (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: 1 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
+                  expressions: 1 (type: int), UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -1548,7 +1548,7 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [$hdt$_0:t1]
+        /t1 [t1]
 
   Stage: Stage-7
     Conditional Operator
@@ -1774,7 +1774,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY 1, key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl3
-POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl3.cnt EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl3.key1 SIMPLE []
 POSTHOOK: Lineage: outputtbl3.key2 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM outputTbl3
@@ -1824,22 +1824,22 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: key, val
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                aggregations: count(val)
+                keys: key (type: string), val (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), 1 (type: int), _col2 (type: string)
-                  null sort order: aaa
-                  sort order: +++
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  null sort order: aa
+                  sort order: ++
                   Map-reduce partition columns: rand() (type: double)
                   Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   tag: -1
-                  value expressions: _col3 (type: bigint)
+                  value expressions: _col2 (type: bigint)
                   auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -1893,14 +1893,14 @@ STAGE PLANS:
               name: default.t1
             name: default.t1
       Truncated Path -> Alias:
-        /t1 [$hdt$_0:t1]
+        /t1 [t1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), 1 (type: int), KEY._col2 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: partials
-          outputColumnNames: _col0, _col1, _col2, _col3
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
@@ -1911,8 +1911,8 @@ STAGE PLANS:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                 properties:
-                  columns _col0,_col1,_col2,_col3
-                  columns.types string,int,string,bigint
+                  columns _col0,_col1,_col2
+                  columns.types string,string,bigint
                   escape.delim \
                   serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                 serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -1926,13 +1926,13 @@ STAGE PLANS:
           TableScan
             GatherStats: false
             Reduce Output Operator
-              key expressions: _col0 (type: string), 1 (type: int), _col2 (type: string)
-              null sort order: aaa
-              sort order: +++
-              Map-reduce partition columns: _col0 (type: string), 1 (type: int), _col2 (type: string)
+              key expressions: _col0 (type: string), _col1 (type: string)
+              null sort order: aa
+              sort order: ++
+              Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col3 (type: bigint)
+              value expressions: _col2 (type: bigint)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -1943,8 +1943,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col2,_col3
-              columns.types string,int,string,bigint
+              columns _col0,_col1,_col2
+              columns.types string,string,bigint
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -1952,8 +1952,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col2,_col3
-                columns.types string,int,string,bigint
+                columns _col0,_col1,_col2
+                columns.types string,string,bigint
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -1963,12 +1963,12 @@ STAGE PLANS:
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), 1 (type: int), KEY._col2 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
           mode: final
-          outputColumnNames: _col0, _col1, _col2, _col3
+          outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+            expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3
             Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -2044,7 +2044,7 @@ SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
@@ -4585,16 +4585,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: key, val
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                aggregations: count(val)
+                keys: key (type: string), val (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -4680,7 +4680,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [$hdt$_0:t2]
+        /t2 [t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -4906,7 +4906,7 @@ SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl4
-POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl4.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl4.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl4.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -4964,16 +4964,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: key, val
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int)
+                aggregations: count(val)
+                keys: key (type: string), val (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), 2 (type: int), UDFToInteger(_col4) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), 2 (type: int), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -5059,7 +5059,7 @@ STAGE PLANS:
               name: default.t2
             name: default.t2
       Truncated Path -> Alias:
-        /t2 [$hdt$_0:t2]
+        /t2 [t2]
 
   Stage: Stage-7
     Conditional Operator
@@ -5285,7 +5285,7 @@ SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: default@outputtbl5
-POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
+POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
 POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
 POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
@@ -5340,16 +5340,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
+                keys: _col0 (type: string), _col1 (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 1 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -5723,16 +5723,16 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col2
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                keys: _col0 (type: string), 2 (type: int), _col2 (type: string)
+                keys: _col0 (type: string), _col1 (type: string)
                 mode: final
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
+                  expressions: UDFToInteger(_col0) (type: int), 2 (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/index_auto_unused.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_unused.q.out b/ql/src/test/results/clientpositive/index_auto_unused.q.out
index 81079f8..e476050 100644
--- a/ql/src/test/results/clientpositive/index_auto_unused.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_unused.q.out
@@ -356,25 +356,37 @@ PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-0 is a root stage
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            filterExpr: ((ds = '2008-04-09') and (12.0 = 12.0) and (UDFToDouble(key) < 10.0)) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (UDFToDouble(key) < 10.0) (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), value (type: string), '2008-04-09' (type: string), hr (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
   Stage: Stage-0
     Fetch Operator
       limit: -1
       Processor Tree:
-        TableScan
-          alias: srcpart
-          filterExpr: (UDFToDouble(key) < 10.0) (type: boolean)
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (UDFToDouble(key) < 10.0) (type: boolean)
-            Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: key (type: string), value (type: string), '2008-04-09' (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              ListSink
+        ListSink
 
 PREHOOK: query: SELECT * FROM srcpart WHERE ds='2008-04-09' AND hr=12 AND key < 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/infer_join_preds.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_join_preds.q.out b/ql/src/test/results/clientpositive/infer_join_preds.q.out
index 8afc905..38baed3 100644
--- a/ql/src/test/results/clientpositive/infer_join_preds.q.out
+++ b/ql/src/test/results/clientpositive/infer_join_preds.q.out
@@ -158,17 +158,17 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+              predicate: key is not null (type: boolean)
+              Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
@@ -178,10 +178,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -248,19 +248,16 @@ STAGE PLANS:
           TableScan
             alias: src1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -269,10 +266,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 27 Data size: 210 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -549,19 +546,16 @@ STAGE PLANS:
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: false (type: boolean)
-              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col0 (type: string)
+                sort order: +
+                Map-reduce partition columns: _col0 (type: string)
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col1 (type: string)
           TableScan
             alias: src1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -586,10 +580,10 @@ STAGE PLANS:
             0 _col0 (type: string)
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/input23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input23.q.out b/ql/src/test/results/clientpositive/input23.q.out
index dcb2891..e03c9e7 100644
--- a/ql/src/test/results/clientpositive/input23.q.out
+++ b/ql/src/test/results/clientpositive/input23.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain extended
  select * from srcpart a join srcpart b where a.ds = '2008-04-08' and a.hr = '11' and b.ds = '2008-04-08' and b.hr = '14' limit 5
 PREHOOK: type: QUERY
@@ -144,7 +144,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * from srcpart a join srcpart b where a.ds = '2008-04-08' and a.hr = '11' and b.ds = '2008-04-08' and b.hr = '14' limit 5
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/input26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input26.q.out b/ql/src/test/results/clientpositive/input26.q.out
index 87b7081..74035c5 100644
--- a/ql/src/test/results/clientpositive/input26.q.out
+++ b/ql/src/test/results/clientpositive/input26.q.out
@@ -44,8 +44,8 @@ STAGE PLANS:
             Number of rows: 5
             Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col3
+              expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
@@ -60,31 +60,23 @@ STAGE PLANS:
           TableScan
             Union
               Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           TableScan
             Union
               Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), _col3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
+              File Output Operator
+                compressed: false
                 Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 6 Data size: 50 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-3
     Map Reduce
@@ -116,8 +108,8 @@ STAGE PLANS:
             Number of rows: 5
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: _col0 (type: string), _col1 (type: string), '14' (type: string)
-              outputColumnNames: _col0, _col1, _col3
+              expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '14' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator
                 compressed: false

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/input6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input6.q.out b/ql/src/test/results/clientpositive/input6.q.out
index 5ed2767..3d1a815 100644
--- a/ql/src/test/results/clientpositive/input6.q.out
+++ b/ql/src/test/results/clientpositive/input6.q.out
@@ -109,7 +109,7 @@ INSERT OVERWRITE TABLE dest1 SELECT src1.key, src1.value WHERE src1.key is null
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@dest1
-POSTHOOK: Lineage: dest1.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.key SIMPLE []
 POSTHOOK: Lineage: dest1.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/input8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input8.q.out b/ql/src/test/results/clientpositive/input8.q.out
index 03857fc..e2b6b4a 100644
--- a/ql/src/test/results/clientpositive/input8.q.out
+++ b/ql/src/test/results/clientpositive/input8.q.out
@@ -106,9 +106,9 @@ INSERT OVERWRITE TABLE dest1 SELECT 4 + NULL, src1.key - NULL, NULL + NULL
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@dest1
-POSTHOOK: Lineage: dest1.c1 EXPRESSION []
+POSTHOOK: Lineage: dest1.c1 SIMPLE []
 POSTHOOK: Lineage: dest1.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1.c3 EXPRESSION []
+POSTHOOK: Lineage: dest1.c3 SIMPLE []
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/input_part10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part10.q.out b/ql/src/test/results/clientpositive/input_part10.q.out
index c8fb37e..8455bb3 100644
--- a/ql/src/test/results/clientpositive/input_part10.q.out
+++ b/ql/src/test/results/clientpositive/input_part10.q.out
@@ -45,6 +45,8 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 1 (type: int), 2 (type: int)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
               Limit
                 Number of rows: 1
@@ -53,9 +55,10 @@ STAGE PLANS:
                   sort order: 
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.1
+                  value expressions: _col0 (type: int), _col1 (type: int)
       Reduce Operator Tree:
         Select Operator
-          expressions: 1 (type: int), 2 (type: int)
+          expressions: VALUE._col0 (type: int), VALUE._col1 (type: int)
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
           Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/insert_into5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_into5.q.out b/ql/src/test/results/clientpositive/insert_into5.q.out
index b9510b9..7b471f4 100644
--- a/ql/src/test/results/clientpositive/insert_into5.q.out
+++ b/ql/src/test/results/clientpositive/insert_into5.q.out
@@ -39,6 +39,8 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 1 (type: int), 'one' (type: string)
+              outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
               Limit
                 Number of rows: 10
@@ -47,9 +49,10 @@ STAGE PLANS:
                   sort order: 
                   Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: COMPLETE
                   TopN Hash Memory Usage: 0.1
+                  value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: 1 (type: int), 'one' (type: string)
+          expressions: VALUE._col0 (type: int), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 10 Data size: 910 Basic stats: COMPLETE Column stats: COMPLETE
           Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
index f7a9853..c114654 100644
--- a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
+++ b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out
@@ -53,7 +53,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@sample_06
 POSTHOOK: Output: default@tab1
 POSTHOOK: Lineage: tab1.age SIMPLE [(sample_06)sample_06.FieldSchema(name:age, type:int, comment:null), ]
-POSTHOOK: Lineage: tab1.gpa SIMPLE [(sample_06)sample_06.FieldSchema(name:gpa, type:decimal(3,2), comment:null), ]
+POSTHOOK: Lineage: tab1.gpa SIMPLE []
 POSTHOOK: Lineage: tab1.name SIMPLE [(sample_06)sample_06.FieldSchema(name:name, type:varchar(50), comment:null), ]
 PREHOOK: query: select * from tab1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join38.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join38.q.out b/ql/src/test/results/clientpositive/join38.q.out
index 7f76c5d..aca06b2 100644
--- a/ql/src/test/results/clientpositive/join38.q.out
+++ b/ql/src/test/results/clientpositive/join38.q.out
@@ -15,17 +15,17 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@tmp
 POSTHOOK: Lineage: tmp.col0 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col10 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col11 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col4 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col5 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col6 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col7 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col8 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: tmp.col9 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp.col1 SIMPLE []
+POSTHOOK: Lineage: tmp.col10 SIMPLE []
+POSTHOOK: Lineage: tmp.col11 SIMPLE []
+POSTHOOK: Lineage: tmp.col2 SIMPLE []
+POSTHOOK: Lineage: tmp.col3 SIMPLE []
+POSTHOOK: Lineage: tmp.col4 SIMPLE []
+POSTHOOK: Lineage: tmp.col5 SIMPLE []
+POSTHOOK: Lineage: tmp.col6 SIMPLE []
+POSTHOOK: Lineage: tmp.col7 SIMPLE []
+POSTHOOK: Lineage: tmp.col8 SIMPLE []
+POSTHOOK: Lineage: tmp.col9 SIMPLE []
 PREHOOK: query: select * from tmp
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join42.q.out b/ql/src/test/results/clientpositive/join42.q.out
index 462e49e..57979b0 100644
--- a/ql/src/test/results/clientpositive/join42.q.out
+++ b/ql/src/test/results/clientpositive/join42.q.out
@@ -90,7 +90,7 @@ POSTHOOK: Output: default@acct
 POSTHOOK: Lineage: acct.acc_n EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
 POSTHOOK: Lineage: acct.aid EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
 POSTHOOK: Lineage: acct.brn EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ]
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: --[HIVE-10841] (WHERE col is not null) does not work sometimes for queries with many JOIN statements
 explain select
   acct.ACC_N,
@@ -294,7 +294,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select
   acct.ACC_N,
   acct.brn

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out
index d7e7cb1..82b7357 100644
--- a/ql/src/test/results/clientpositive/join8.q.out
+++ b/ql/src/test/results/clientpositive/join8.q.out
@@ -153,7 +153,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dest1
 POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION []
 POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT dest1.* FROM dest1
 PREHOOK: type: QUERY


[32/66] [abbrv] hive git commit: HIVE-13029: NVDIMM support for LLAP Cache (Gopal V, reviewed by Sergey Shelukhin)

Posted by sp...@apache.org.
HIVE-13029: NVDIMM support for LLAP Cache (Gopal V, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dee45522
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dee45522
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dee45522

Branch: refs/heads/java8
Commit: dee45522934ae8eb9fbfdbd6df00fcc97377faed
Parents: ebc5e6a
Author: Gopal V <go...@apache.org>
Authored: Wed May 25 14:03:48 2016 -0700
Committer: Gopal V <go...@apache.org>
Committed: Wed May 25 14:03:48 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  8 +++
 .../org/apache/hadoop/hive/conf/Validator.java  | 29 ++++++++
 .../hadoop/hive/llap/cache/BuddyAllocator.java  | 75 ++++++++++++++++++--
 .../hadoop/hive/llap/cli/LlapServiceDriver.java | 15 ++--
 .../hive/llap/cache/TestBuddyAllocator.java     | 46 ++++++++----
 5 files changed, 150 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/dee45522/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index ed20069..3e295fe 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.conf;
 
 import com.google.common.base.Joiner;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hive.conf.Validator.RatioValidator;
 import org.apache.hadoop.hive.conf.Validator.SizeValidator;
 import org.apache.hadoop.hive.conf.Validator.StringSet;
 import org.apache.hadoop.hive.conf.Validator.TimeValidator;
+import org.apache.hadoop.hive.conf.Validator.WritableDirectoryValidator;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
@@ -2630,6 +2632,12 @@ public class HiveConf extends Configuration {
         "Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"),
     LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true,
         "Whether ORC low-level cache should use direct allocation."),
+    LLAP_ALLOCATOR_MAPPED("hive.llap.io.allocator.mmap", false,
+        "Whether ORC low-level cache should use memory mapped allocation (direct I/O). \n" +
+        "This is recommended to be used along-side NVDIMM (DAX) or NVMe flash storage."),
+    LLAP_ALLOCATOR_MAPPED_PATH("hive.llap.io.allocator.mmap.path", "/tmp",
+        new WritableDirectoryValidator(),
+        "The directory location for mapping NVDIMM/NVMe flash storage into the ORC low-level cache."),
     LLAP_USE_LRFU("hive.llap.io.use.lrfu", true,
         "Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."),
     LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.01f,

http://git-wip-us.apache.org/repos/asf/hive/blob/dee45522/common/src/java/org/apache/hadoop/hive/conf/Validator.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/Validator.java b/common/src/java/org/apache/hadoop/hive/conf/Validator.java
index bb8962a..00e7b28 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/Validator.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/Validator.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hive.conf;
 
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.LinkedHashSet;
@@ -346,4 +349,30 @@ public interface Validator {
       return current > 0 ? ((long)(size / current) + "Pb") : (size + units[0]);
     }
   }
+
+  public class WritableDirectoryValidator implements Validator {
+
+    @Override
+    public String validate(String value) {
+      final Path path = FileSystems.getDefault().getPath(value);
+      if (path == null && value != null) {
+        return String.format("Path '%s' provided could not be located.", value);
+      }
+      final boolean isDir = Files.isDirectory(path);
+      final boolean isWritable = Files.isWritable(path);
+      if (!isDir) {
+        return String.format("Path '%s' provided is not a directory.", value);
+      }
+      if (!isWritable) {
+        return String.format("Path '%s' provided is not writable.", value);
+      }
+      return null;
+    }
+
+    @Override
+    public String toDescription() {
+      return "Expects a writable directory on the local filesystem";
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/dee45522/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
index 1d5a7db..47325ad 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/BuddyAllocator.java
@@ -18,9 +18,23 @@
 package org.apache.hadoop.hive.llap.cache;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel.MapMode;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.attribute.FileAttribute;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.ReentrantLock;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -39,6 +53,8 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
   private final int minAllocation, maxAllocation, arenaSize;
   private final long maxSize;
   private final boolean isDirect;
+  private final boolean isMapped;
+  private final Path cacheDir;
   private final LlapDaemonCacheMetrics metrics;
 
   // We don't know the acceptable size for Java array, so we'll use 1Gb boundary.
@@ -47,13 +63,21 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
   // Don't try to operate with less than MIN_SIZE allocator space, it will just give you grief.
   private static final int MIN_TOTAL_MEMORY_SIZE = 64*1024*1024;
 
+  private static final FileAttribute<Set<PosixFilePermission>> RWX = PosixFilePermissions
+      .asFileAttribute(PosixFilePermissions.fromString("rwx------"));
+  private static final FileAttribute<Set<PosixFilePermission>> RW_ = PosixFilePermissions
+      .asFileAttribute(PosixFilePermissions.fromString("rw-------"));
+
 
   public BuddyAllocator(Configuration conf, MemoryManager mm, LlapDaemonCacheMetrics metrics) {
     this(HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_DIRECT),
+        HiveConf.getBoolVar(conf, ConfVars.LLAP_ALLOCATOR_MAPPED),
         (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MIN_ALLOC),
         (int)HiveConf.getSizeVar(conf, ConfVars.LLAP_ALLOCATOR_MAX_ALLOC),
         HiveConf.getIntVar(conf, ConfVars.LLAP_ALLOCATOR_ARENA_COUNT),
-        getMaxTotalMemorySize(conf), mm, metrics);
+        getMaxTotalMemorySize(conf), 
+        HiveConf.getVar(conf, ConfVars.LLAP_ALLOCATOR_MAPPED_PATH),
+        mm, metrics);
   }
 
   private static long getMaxTotalMemorySize(Configuration conf) {
@@ -71,14 +95,35 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
   @VisibleForTesting
   public BuddyAllocator(boolean isDirectVal, int minAllocVal, int maxAllocVal, int arenaCount,
       long maxSizeVal, MemoryManager memoryManager, LlapDaemonCacheMetrics metrics) {
+    this(isDirectVal, false /*isMapped*/,  minAllocVal, maxAllocVal, arenaCount, maxSizeVal, 
+        null /* mapping path */, memoryManager, metrics);
+  }
+
+  @VisibleForTesting
+  public BuddyAllocator(boolean isDirectVal, boolean isMappedVal, int minAllocVal, int maxAllocVal,
+      int arenaCount, long maxSizeVal, String mapPath, MemoryManager memoryManager,
+      LlapDaemonCacheMetrics metrics) {
     isDirect = isDirectVal;
+    isMapped = isMappedVal;
     minAllocation = minAllocVal;
     maxAllocation = maxAllocVal;
+    if (isMapped) {
+      try {
+        cacheDir =
+            Files.createTempDirectory(FileSystems.getDefault().getPath(mapPath), "llap-", RWX);
+      } catch (IOException ioe) {
+        // conf validator already checks this, so it will never trigger usually
+        throw new AssertionError("Configured mmap directory should be writable", ioe);
+      }
+    } else {
+      cacheDir = null;
+    }
     int arenaSizeVal = (arenaCount == 0) ? MAX_ARENA_SIZE : (int)(maxSizeVal / arenaCount);
     arenaSizeVal = Math.max(maxAllocation, Math.min(arenaSizeVal, MAX_ARENA_SIZE));
     if (LlapIoImpl.LOG.isInfoEnabled()) {
-      LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte")
-          + " buffers; allocation sizes " + minAllocation + " - " + maxAllocation
+      LlapIoImpl.LOG.info("Buddy allocator with " + (isDirect ? "direct" : "byte") + " buffers;"
+          + (isMapped ? (" memory mapped off " + cacheDir.toString() + "; ") : "")
+          + "allocation sizes " + minAllocation + " - " + maxAllocation
           + ", arena size " + arenaSizeVal + ". total size " + maxSizeVal);
     }
 
@@ -288,6 +333,28 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
     return maxSize;
   }
 
+  private ByteBuffer preallocate(int arenaSize) {
+    if (isMapped) {
+      Preconditions.checkArgument(isDirect, "All memory mapped allocations have to be direct buffers");
+      try {
+        File rf = File.createTempFile("arena-", ".cache", cacheDir.toFile());
+        RandomAccessFile rwf = new RandomAccessFile(rf, "rw");
+        rwf.setLength(arenaSize); // truncate (TODO: posix_fallocate?)
+        ByteBuffer rwbuf = rwf.getChannel().map(MapMode.PRIVATE, 0, arenaSize);
+        // A mapping, once established, is not dependent upon the file channel that was used to
+        // create it. delete file and hold onto the map
+        rwf.close();
+        rf.delete();
+        return rwbuf;
+      } catch (IOException ioe) {
+        LlapIoImpl.LOG.warn("Failed trying to allocate memory mapped arena", ioe);
+        // fail similarly when memory allocations fail
+        throw new OutOfMemoryError("Failed trying to allocate memory mapped arena: " + ioe.getMessage());
+      }
+    }
+    return isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
+  }
+
   private class Arena {
     private ByteBuffer data;
     // Avoid storing headers with data since we expect binary size allocations.
@@ -297,7 +364,7 @@ public final class BuddyAllocator implements EvictionAwareAllocator, BuddyAlloca
 
     void init() {
       try {
-        data = isDirect ? ByteBuffer.allocateDirect(arenaSize) : ByteBuffer.allocate(arenaSize);
+        data = preallocate(arenaSize);
       } catch (OutOfMemoryError oom) {
         throw new OutOfMemoryError("Cannot allocate " + arenaSize + " bytes: " + oom.getMessage()
             + "; make sure your xmx and process size are set correctly.");

http://git-wip-us.apache.org/repos/asf/hive/blob/dee45522/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index c8fddb1..dc80992 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -200,16 +200,23 @@ public class LlapServiceDriver {
 
     if (options.getSize() != -1) {
       if (options.getCache() != -1) {
-        Preconditions.checkArgument(options.getCache() < options.getSize(),
-            "Cache has to be smaller than the container sizing");
+        if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED) == false) {
+          // direct heap allocations need to be safer
+          Preconditions.checkArgument(options.getCache() < options.getSize(),
+              "Cache has to be smaller than the container sizing");
+        } else if (options.getCache() < options.getSize()) {
+          LOG.warn("Note that this might need YARN physical memory monitoring to be turned off (yarn.nodemanager.pmem-check-enabled=false)");
+        }
       }
       if (options.getXmx() != -1) {
         Preconditions.checkArgument(options.getXmx() < options.getSize(),
             "Working memory has to be smaller than the container sizing");
       }
-      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT)) {
+      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_DIRECT)
+          && false == HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_ALLOCATOR_MAPPED)) {
+        // direct and not memory mapped
         Preconditions.checkArgument(options.getXmx() + options.getCache() < options.getSize(),
-            "Working memory + cache has to be smaller than the containing sizing ");
+            "Working memory + cache has to be smaller than the container sizing ");
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/dee45522/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
index 7b04103..345f5b1 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestBuddyAllocator.java
@@ -18,8 +18,9 @@
 package org.apache.hadoop.hive.llap.cache;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
 
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CountDownLatch;
@@ -27,18 +28,33 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.FutureTask;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.io.Allocator.AllocatorOutOfMemoryException;
 import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+@RunWith(Parameterized.class)
 public class TestBuddyAllocator {
   private static final Logger LOG = LoggerFactory.getLogger(TestBuddyAllocator.class);
   private final Random rdm = new Random(2284);
+  private final boolean isDirect;
+  private final boolean isMapped;
+  private final String tmpDir = System.getProperty("java.io.tmpdir", ".");
+
+  @Parameters
+  public static Collection<Object[]> data() {
+    return Arrays.asList(new Object[][] { { false, false }, { true, false }, { true, true } });
+  }
+
+  public TestBuddyAllocator(boolean direct, boolean mmap) {
+    isDirect = direct;
+    isMapped = mmap;
+  }
 
   private static class DummyMemoryManager implements MemoryManager {
     @Override
@@ -78,8 +94,8 @@ public class TestBuddyAllocator {
   @Test
   public void testSameSizes() throws Exception {
     int min = 3, max = 8, maxAlloc = 1 << max;
-    BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, maxAlloc, maxAlloc,
-        new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
+    BuddyAllocator a = new BuddyAllocator(isDirect, isMapped, 1 << min, maxAlloc, maxAlloc, maxAlloc,
+        tmpDir, new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
     for (int i = max; i >= min; --i) {
       allocSameSize(a, 1 << (max - i), i);
     }
@@ -88,16 +104,16 @@ public class TestBuddyAllocator {
   @Test
   public void testMultipleArenas() throws Exception {
     int max = 8, maxAlloc = 1 << max, allocLog2 = max - 1, arenaCount = 5;
-    BuddyAllocator a = new BuddyAllocator(false, 1 << 3, maxAlloc, maxAlloc, maxAlloc * arenaCount,
-        new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
+    BuddyAllocator a = new BuddyAllocator(isDirect, isMapped, 1 << 3, maxAlloc, maxAlloc, maxAlloc * arenaCount,
+        tmpDir, new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
     allocSameSize(a, arenaCount * 2, allocLog2);
   }
 
   @Test
   public void testMTT() {
     final int min = 3, max = 8, maxAlloc = 1 << max, allocsPerSize = 3;
-    final BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, maxAlloc * 8,
-        maxAlloc * 24, new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
+    final BuddyAllocator a = new BuddyAllocator(isDirect, isMapped, 1 << min, maxAlloc, maxAlloc * 8,
+        maxAlloc * 24, tmpDir, new DummyMemoryManager(), LlapDaemonCacheMetrics.create("test", "1"));
     ExecutorService executor = Executors.newFixedThreadPool(3);
     final CountDownLatch cdlIn = new CountDownLatch(3), cdlOut = new CountDownLatch(1);
     FutureTask<Void> upTask = new FutureTask<Void>(new Callable<Void>() {
@@ -140,8 +156,8 @@ public class TestBuddyAllocator {
   @Test
   public void testMTTArenas() {
     final int min = 3, max = 4, maxAlloc = 1 << max, minAllocCount = 2048, threadCount = 4;
-    final BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, maxAlloc,
-        (1 << min) * minAllocCount, new DummyMemoryManager(),
+    final BuddyAllocator a = new BuddyAllocator(isDirect, isMapped, 1 << min, maxAlloc, maxAlloc,
+        (1 << min) * minAllocCount, tmpDir, new DummyMemoryManager(),
         LlapDaemonCacheMetrics.create("test", "1"));
     ExecutorService executor = Executors.newFixedThreadPool(threadCount);
     final CountDownLatch cdlIn = new CountDownLatch(threadCount), cdlOut = new CountDownLatch(1);
@@ -180,8 +196,8 @@ public class TestBuddyAllocator {
   private void testVariableSizeInternal(
       int allocCount, int arenaSizeMult, int arenaCount) throws Exception {
     int min = 3, max = 8, maxAlloc = 1 << max, arenaSize = maxAlloc * arenaSizeMult;
-    BuddyAllocator a = new BuddyAllocator(false, 1 << min, maxAlloc, arenaSize,
-        arenaSize * arenaCount, new DummyMemoryManager(),
+    BuddyAllocator a = new BuddyAllocator(isDirect, isMapped, 1 << min, maxAlloc, arenaSize,
+        arenaSize * arenaCount, tmpDir , new DummyMemoryManager(),
         LlapDaemonCacheMetrics.create("test", "1"));
     allocateUp(a, min, max, allocCount, true);
     allocateDown(a, min, max, allocCount, true);


[18/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan) (addendum)

Posted by sp...@apache.org.
HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan) (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eff6e058
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eff6e058
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eff6e058

Branch: refs/heads/java8
Commit: eff6e05823e31b1dcc896f2f4532db27d0ba5439
Parents: 10423f5
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue May 24 15:02:03 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Tue May 24 15:02:03 2016 +0100

----------------------------------------------------------------------
 .../clientpositive/autoColumnStats_5.q.out      | 36 ++++++++++----------
 .../clientpositive/autoColumnStats_6.q.out      | 10 +++---
 2 files changed, 23 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eff6e058/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
index e905748..04ed3ce 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out
@@ -56,30 +56,30 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.partitioned1
               Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), 1 (type: int)
+                expressions: _col0 (type: int), _col1 (type: string), UDFToInteger('1') (type: int)
                 outputColumnNames: a, b, part
                 Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: compute_stats(a, 16), compute_stats(b, 16)
-                  keys: 1 (type: int)
+                  keys: part (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: 1 (type: int)
+                    key expressions: _col0 (type: int)
                     sort order: +
-                    Map-reduce partition columns: 1 (type: int)
+                    Map-reduce partition columns: _col0 (type: int)
                     Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
-          keys: 1 (type: int)
+          keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), 1 (type: int)
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -298,30 +298,30 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.partitioned1
               Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), 2 (type: int)
+                expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('2') (type: int)
                 outputColumnNames: a, b, c, d, part
                 Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: compute_stats(a, 16), compute_stats(b, 16), compute_stats(c, 16), compute_stats(d, 16)
-                  keys: 2 (type: int)
+                  keys: part (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: 2 (type: int)
+                    key expressions: _col0 (type: int)
                     sort order: +
-                    Map-reduce partition columns: 2 (type: int)
+                    Map-reduce partition columns: _col0 (type: int)
                     Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
-          keys: 2 (type: int)
+          keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), 2 (type: int)
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 1 Data size: 60 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -494,30 +494,30 @@ STAGE PLANS:
                     serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                     name: default.partitioned1
               Select Operator
-                expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), 1 (type: int)
+                expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: string), UDFToInteger('1') (type: int)
                 outputColumnNames: a, b, c, d, part
                 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: compute_stats(a, 16), compute_stats(b, 16), compute_stats(c, 16), compute_stats(d, 16)
-                  keys: 1 (type: int)
+                  keys: part (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: 1 (type: int)
+                    key expressions: _col0 (type: int)
                     sort order: +
-                    Map-reduce partition columns: 1 (type: int)
+                    Map-reduce partition columns: _col0 (type: int)
                     Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col2 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1), compute_stats(VALUE._col2), compute_stats(VALUE._col3)
-          keys: 1 (type: int)
+          keys: KEY._col0 (type: int)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), 1 (type: int)
+            expressions: _col1 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col2 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col0 (type: int)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/eff6e058/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
index de75d8a..29b3373 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_6.q.out
@@ -60,25 +60,25 @@ STAGE PLANS:
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: compute_stats(key, 16), compute_stats(value, 16)
-                  keys: '1' (type: string), two (type: string), three (type: string)
+                  keys: one (type: string), two (type: string), three (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: '1' (type: string), _col1 (type: string), _col2 (type: string)
+                    key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                     sort order: +++
-                    Map-reduce partition columns: '1' (type: string), _col1 (type: string), _col2 (type: string)
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col4 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
-          keys: '1' (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), '1' (type: string), _col1 (type: string), _col2 (type: string)
+            expressions: _col3 (type: struct<columntype:string,min:bigint,max:bigint,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col4 (type: struct<columntype:string,maxlength:bigint,avglength:double,countnulls:bigint,numdistinctvalues:bigint,ndvbitvector:string>), _col0 (type: string), _col1 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
             File Output Operator


[23/66] [abbrv] hive git commit: HIVE-13835 : TestMiniTezCliDriver.vector_complex_all.q needs golden file update

Posted by sp...@apache.org.
HIVE-13835 : TestMiniTezCliDriver.vector_complex_all.q needs golden file update


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4c5ca8bd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4c5ca8bd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4c5ca8bd

Branch: refs/heads/java8
Commit: 4c5ca8bd6705ee3e353cabef11cb2dd9c3df3283
Parents: 2d10398
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue May 24 15:31:33 2016 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue May 24 23:19:50 2016 -0700

----------------------------------------------------------------------
 .../test/results/clientpositive/tez/vector_complex_all.q.out   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4c5ca8bd/ql/src/test/results/clientpositive/tez/vector_complex_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_complex_all.q.out b/ql/src/test/results/clientpositive/tez/vector_complex_all.q.out
index 1d59a3d..04ff49f 100644
--- a/ql/src/test/results/clientpositive/tez/vector_complex_all.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_complex_all.q.out
@@ -111,9 +111,9 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@orc_create_complex
 #### A masked pattern was here ####
 orc_create_complex.str	orc_create_complex.mp	orc_create_complex.lst	orc_create_complex.strct
-line1	{"key11":"value11","key12":"value12","key13":"value13"}	["a","b","c"]	{"a":"one","b":"two"}
-line2	{"key21":"value21","key22":"value22","key23":"value23"}	["d","e","f"]	{"a":"three","b":"four"}
-line3	{"key31":"value31","key32":"value32","key33":"value33"}	["g","h","i"]	{"a":"five","b":"six"}
+line1	{"key13":"value13","key12":"value12","key11":"value11"}	["a","b","c"]	{"a":"one","b":"two"}
+line2	{"key21":"value21","key23":"value23","key22":"value22"}	["d","e","f"]	{"a":"three","b":"four"}
+line3	{"key33":"value33","key31":"value31","key32":"value32"}	["g","h","i"]	{"a":"five","b":"six"}
 PREHOOK: query: -- However, since this query is not referencing the complex fields, it should vectorize.
 EXPLAIN
 SELECT COUNT(*) FROM orc_create_complex


[29/66] [abbrv] hive git commit: HIVE-13845: Delete beeline/pom.xml.orig (Jesus Camacho Rodriguez, reviewed by Sergio Peña)

Posted by sp...@apache.org.
HIVE-13845: Delete beeline/pom.xml.orig (Jesus Camacho Rodriguez, reviewed by Sergio Pe�a)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f40f8bc9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f40f8bc9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f40f8bc9

Branch: refs/heads/java8
Commit: f40f8bc9be45cb6fb45104ff6ed6a9ad6784db6c
Parents: e22e3ad
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed May 25 17:47:55 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed May 25 17:47:55 2016 +0100

----------------------------------------------------------------------
 beeline/pom.xml.orig | 169 ----------------------------------------------
 1 file changed, 169 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f40f8bc9/beeline/pom.xml.orig
----------------------------------------------------------------------
diff --git a/beeline/pom.xml.orig b/beeline/pom.xml.orig
deleted file mode 100644
index 8ac83f5..0000000
--- a/beeline/pom.xml.orig
+++ /dev/null
@@ -1,169 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hive</groupId>
-    <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
-    <relativePath>../pom.xml</relativePath>
-  </parent>
-
-  <artifactId>hive-beeline</artifactId>
-  <packaging>jar</packaging>
-  <name>Hive Beeline</name>
-
-  <properties>
-    <hive.path.to.root>..</hive.path.to.root>
-  </properties>
-
-  <dependencies>
-    <!-- dependencies are always listed in sorted order by groupId, artifectId -->
-    <!-- intra-project -->
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-common</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-metastore</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-shims</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-jdbc</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <!-- inter-project -->
-    <dependency>
-      <groupId>commons-cli</groupId>
-      <artifactId>commons-cli</artifactId>
-      <version>${commons-cli.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-lang</groupId>
-      <artifactId>commons-lang</artifactId>
-      <version>${commons-lang.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>commons-io</groupId>
-      <artifactId>commons-io</artifactId>
-      <version>${commons-io.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>jline</groupId>
-      <artifactId>jline</artifactId>
-      <version>${jline.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-      <optional>true</optional>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.thrift</groupId>
-      <artifactId>libthrift</artifactId>
-      <version>${libthrift.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>net.sf.supercsv</groupId>
-      <artifactId>super-csv</artifactId>
-      <version>${super-csv.version}</version>
-    </dependency>
-    <!-- test intra-project -->
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-exec</artifactId>
-      <version>${project.version}</version>
-      <classifier>tests</classifier>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hive</groupId>
-      <artifactId>hive-service</artifactId>
-      <version>${project.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <!-- test inter-project -->
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-mapreduce-client-core</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <version>${junit.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>postgresql</groupId>
-      <artifactId>postgresql</artifactId>
-      <version>9.1-901.jdbc4</version>
-      <scope>test</scope>
-    </dependency>
-  </dependencies>
-
-  <profiles>
-    <profile>
-      <id>sources</id>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-source-plugin</artifactId>
-            <executions>
-              <execution>
-                <id>attach-sources</id>
-                <goals>
-                  <goal>test-jar</goal>
-                </goals>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-    </profile>
-  </profiles>
-
-  <build>
-    <sourceDirectory>${basedir}/src/java</sourceDirectory>
-    <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-jar-plugin</artifactId>
-        <executions>
-          <execution>
-            <goals>
-              <goal>test-jar</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-
-</project>


[48/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
deleted file mode 100644
index 9947c1a..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.8.out
+++ /dev/null
@@ -1,712 +0,0 @@
-PREHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing alter table ... concatenate: 
--- Use list bucketing DML to generate mutilple files in partitions by turning off merge
--- dynamic partition. multiple skewed columns. merge.
--- The following explains merge example used in this test case
--- DML will generated 2 partitions
--- ds=2008-04-08/hr=a1
--- ds=2008-04-08/hr=b1
--- without merge, each partition has more files
--- ds=2008-04-08/hr=a1 has 2 files
--- ds=2008-04-08/hr=b1 has 6 files
--- with merge each partition has more files
--- ds=2008-04-08/hr=a1 has 1 files
--- ds=2008-04-08/hr=b1 has 4 files
--- The following shows file size and name in each directory
--- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 155 000000_0
--- 155 000001_0
--- with merge
--- 254 000000_0
--- hr=b1/key=103/value=val_103:
--- without merge
--- 99 000000_0
--- 99 000001_0
--- with merge
--- 142 000001_0
--- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
--- without merge
--- 5181 000000_0
--- 5181 000001_0
--- with merge
--- 5181 000000_0
--- 5181 000001_0
--- hr=b1/key=484/value=val_484
--- without merge
--- 87 000000_0
--- 87 000001_0
--- with merge
--- 118 000002_0 
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_dynamic_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            TOK_FUNCTION
-               if
-               ==
-                  %
-                     TOK_TABLE_OR_COL
-                        key
-                     100
-                  0
-               'a1'
-               'b1'
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
-select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_dynamic_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-ds=2008-04-08/hr=a1
-ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, a1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	2                   
-	numRows             	16                  
-	rawDataSize         	136                 
-	totalSize           	310                 
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	6                   
-	numRows             	984                 
-	rawDataSize         	9488                
-	totalSize           	10734               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- concatenate the partition and it will merge files
-alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate
-PREHOOK: type: ALTER_PARTITION_MERGE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-POSTHOOK: query: -- concatenate the partition and it will merge files
-alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate
-POSTHOOK: type: ALTER_PARTITION_MERGE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, b1]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	3                   
-	numRows             	0                   
-	rawDataSize         	0                   
-	totalSize           	10586               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_dynamic_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_dynamic_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_dynamic_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            =
-               TOK_TABLE_OR_COL
-                  key
-               '484'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               'val_484'
-
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr a1
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 2
-              numRows 16
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 136
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 310
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr b1
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 3
-              numRows 0
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 0
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10586
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_dynamic_part
-          Statistics: Num rows: 16 Data size: 136 Basic stats: PARTIAL Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 4 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 4 Data size: 34 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
-#### A masked pattern was here ####
-484	val_484	2008-04-08	b1
-484	val_484	2008-04-08	b1
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_dynamic_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
new file mode 100644
index 0000000..ee36d3f
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out
@@ -0,0 +1,639 @@
+PREHOOK: query: -- list bucketing alter table ... concatenate: 
+-- Use list bucketing DML to generate mutilple files in partitions by turning off merge
+-- dynamic partition. multiple skewed columns. merge.
+-- The following explains merge example used in this test case
+-- DML will generated 2 partitions
+-- ds=2008-04-08/hr=a1
+-- ds=2008-04-08/hr=b1
+-- without merge, each partition has more files
+-- ds=2008-04-08/hr=a1 has 2 files
+-- ds=2008-04-08/hr=b1 has 6 files
+-- with merge each partition has more files
+-- ds=2008-04-08/hr=a1 has 1 files
+-- ds=2008-04-08/hr=b1 has 4 files
+-- The following shows file size and name in each directory
+-- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 155 000000_0
+-- 155 000001_0
+-- with merge
+-- 254 000000_0
+-- hr=b1/key=103/value=val_103:
+-- without merge
+-- 99 000000_0
+-- 99 000001_0
+-- with merge
+-- 142 000001_0
+-- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- with merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- hr=b1/key=484/value=val_484
+-- without merge
+-- 87 000000_0
+-- 87 000001_0
+-- with merge
+-- 118 000002_0 
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- list bucketing alter table ... concatenate: 
+-- Use list bucketing DML to generate mutilple files in partitions by turning off merge
+-- dynamic partition. multiple skewed columns. merge.
+-- The following explains merge example used in this test case
+-- DML will generated 2 partitions
+-- ds=2008-04-08/hr=a1
+-- ds=2008-04-08/hr=b1
+-- without merge, each partition has more files
+-- ds=2008-04-08/hr=a1 has 2 files
+-- ds=2008-04-08/hr=b1 has 6 files
+-- with merge each partition has more files
+-- ds=2008-04-08/hr=a1 has 1 files
+-- ds=2008-04-08/hr=b1 has 4 files
+-- The following shows file size and name in each directory
+-- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 155 000000_0
+-- 155 000001_0
+-- with merge
+-- 254 000000_0
+-- hr=b1/key=103/value=val_103:
+-- without merge
+-- 99 000000_0
+-- 99 000001_0
+-- with merge
+-- 142 000001_0
+-- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+-- without merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- with merge
+-- 5181 000000_0
+-- 5181 000001_0
+-- hr=b1/key=484/value=val_484
+-- without merge
+-- 87 000000_0
+-- 87 000001_0
+-- with merge
+-- 118 000002_0 
+
+-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+-- create a skewed table
+create table list_bucketing_dynamic_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_dynamic_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string), if(((UDFToDouble(key) % 100.0) = 0.0), 'a1', 'b1') (type: string)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_dynamic_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_dynamic_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
+POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr)
+select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_dynamic_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_dynamic_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+ds=2008-04-08/hr=a1
+ds=2008-04-08/hr=b1
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='a1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, a1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	2                   
+	numRows             	16                  
+	rawDataSize         	136                 
+	totalSize           	310                 
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, b1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	6                   
+	numRows             	984                 
+	rawDataSize         	9488                
+	totalSize           	10734               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- concatenate the partition and it will merge files
+alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate
+PREHOOK: type: ALTER_PARTITION_MERGE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+POSTHOOK: query: -- concatenate the partition and it will merge files
+alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate
+POSTHOOK: type: ALTER_PARTITION_MERGE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, b1]    	 
+Database:           	default             	 
+Table:              	list_bucketing_dynamic_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	numFiles            	3                   
+	numRows             	984                 
+	rawDataSize         	9488                
+	totalSize           	10586               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from list_bucketing_dynamic_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from list_bucketing_dynamic_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+1000
+PREHOOK: query: explain extended
+select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr a1
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              numFiles 2
+              numRows 16
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 136
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 310
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr b1
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_dynamic_part
+              numFiles 3
+              numRows 984
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 9488
+              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 10586
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_dynamic_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_dynamic_part
+            name: default.list_bucketing_dynamic_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_dynamic_part
+          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: '484' (type: string), 'val_484' (type: string), ds (type: string), hr (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_dynamic_part where key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=a1
+POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1
+#### A masked pattern was here ####
+484	val_484	2008-04-08	b1
+484	val_484	2008-04-08	b1
+PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484' order by hr
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	12
+PREHOOK: query: -- clean up
+drop table list_bucketing_dynamic_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_dynamic_part
+PREHOOK: Output: default@list_bucketing_dynamic_part
+POSTHOOK: query: -- clean up
+drop table list_bucketing_dynamic_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_dynamic_part
+POSTHOOK: Output: default@list_bucketing_dynamic_part


[58/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out b/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out
deleted file mode 100644
index 5691a06..0000000
--- a/ql/src/test/results/clientpositive/char_udf1.q.java1.8.out
+++ /dev/null
@@ -1,457 +0,0 @@
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@char_udf_1
-PREHOOK: query: insert overwrite table char_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: insert overwrite table char_udf_1
-  select key, value, key, value from src where key = '238' limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@char_udf_1
-POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- UDFs with char support
-select 
-  concat(c1, c2),
-  concat(c3, c4),
-  concat(c1, c2) = concat(c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238val_238	238val_238                    	true
-PREHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  upper(c2),
-  upper(c4),
-  upper(c2) = upper(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-VAL_238	VAL_238             	true
-PREHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lower(c2),
-  lower(c4),
-  lower(c2) = lower(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238             	true
-PREHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Scalar UDFs
-select
-  ascii(c2),
-  ascii(c4),
-  ascii(c2) = ascii(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-118	118	true
-PREHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  concat_ws('|', c1, c2),
-  concat_ws('|', c3, c4),
-  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238|val_238	238|val_238	true
-PREHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
-  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  instr(c2, '_'),
-  instr(c4, '_'),
-  instr(c2, '_') = instr(c4, '_')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-4	4	true
-PREHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  length(c2),
-  length(c4),
-  length(c2) = length(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-7	7	true
-PREHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  locate('a', 'abcdabcd', 3),
-  locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
-  locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-5	5	true
-PREHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  lpad(c2, 15, ' '),
-  lpad(c4, 15, ' '),
-  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-        val_238	        val_238	true
-PREHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  ltrim(c2),
-  ltrim(c4),
-  ltrim(c2) = ltrim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  regexp(c2, 'val'),
-  regexp(c4, 'val'),
-  regexp(c2, 'val') = regexp(c4, 'val')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp(c2, 'val'),
-  regexp(c4, 'val'),
-  regexp(c2, 'val') = regexp(c4, 'val')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-true	true	true
-PREHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_extract(c2, 'val_([0-9]+)', 1),
-  regexp_extract(c4, 'val_([0-9]+)', 1),
-  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-238	238	true
-PREHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  regexp_replace(c2, 'val', 'replaced'),
-  regexp_replace(c4, 'val', 'replaced'),
-  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-replaced_238	replaced_238	true
-PREHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  reverse(c2),
-  reverse(c4),
-  reverse(c2) = reverse(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-832_lav	832_lav	true
-PREHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rpad(c2, 15, ' '),
-  rpad(c4, 15, ' '),
-  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238        	val_238        	true
-PREHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  rtrim(c2),
-  rtrim(c4),
-  rtrim(c2) = rtrim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as char(50)))
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  sentences('See spot run.  See jane run.'),
-  sentences(cast('See spot run.  See jane run.' as char(50)))
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
-PREHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  split(c2, '_'),
-  split(c4, '_')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-["val","238"]	["val","238"]
-PREHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select 
-  str_to_map('a:1,b:2,c:3',',',':'),
-  str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-{"a":"1","b":"2","c":"3"}	{"a":"1","b":"2","c":"3"}
-PREHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  substr(c2, 1, 3),
-  substr(c4, 1, 3),
-  substr(c2, 1, 3) = substr(c4, 1, 3)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val	val	true
-PREHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from char_udf_1 limit 1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  trim(c2),
-  trim(c4),
-  trim(c2) = trim(c4)
-from char_udf_1 limit 1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238	true
-PREHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: -- Aggregate Functions
-select
-  compute_stats(c2, 16),
-  compute_stats(c4, 16)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1}
-PREHOOK: query: select
-  min(c2),
-  min(c4)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  min(c2),
-  min(c4)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238             
-PREHOOK: query: select
-  max(c2),
-  max(c4)
-from char_udf_1
-PREHOOK: type: QUERY
-PREHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-POSTHOOK: query: select
-  max(c2),
-  max(c4)
-from char_udf_1
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@char_udf_1
-#### A masked pattern was here ####
-val_238	val_238             
-PREHOOK: query: drop table char_udf_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@char_udf_1
-PREHOOK: Output: default@char_udf_1
-POSTHOOK: query: drop table char_udf_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@char_udf_1
-POSTHOOK: Output: default@char_udf_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/char_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_udf1.q.out b/ql/src/test/results/clientpositive/char_udf1.q.out
new file mode 100644
index 0000000..d84237a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/char_udf1.q.out
@@ -0,0 +1,459 @@
+PREHOOK: query: drop table char_udf_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table char_udf_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@char_udf_1
+POSTHOOK: query: create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@char_udf_1
+PREHOOK: query: insert overwrite table char_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@char_udf_1
+POSTHOOK: query: insert overwrite table char_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@char_udf_1
+POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- UDFs with char support
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- UDFs with char support
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+238val_238	238val_238                    	true
+PREHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+VAL_238	VAL_238             	true
+PREHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238             	true
+PREHOOK: query: -- Scalar UDFs
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Scalar UDFs
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+118	118	true
+PREHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+238|val_238	238|val_238	true
+PREHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+4	4	true
+PREHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+7	7	true
+PREHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as char(1)), cast('abcdabcd' as char(10)), 3)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+5	5	true
+PREHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+        val_238	        val_238	true
+PREHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
+-- we only allow A regexp B, not regexp (A,B).
+
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF
+-- we only allow A regexp B, not regexp (A,B).
+
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+true	true	true
+PREHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+replaced_238	replaced_238	true
+PREHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+832_lav	832_lav	true
+PREHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238        	val_238        	true
+PREHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as char(50)))
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as char(50)))
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
+PREHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+["val","238"]	["val","238"]
+PREHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as char(20)),',',':')
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+{"a":"1","b":"2","c":"3"}	{"a":"1","b":"2","c":"3"}
+PREHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val	val	true
+PREHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from char_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from char_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Aggregate Functions
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from char_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: -- Aggregate Functions
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from char_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
+PREHOOK: query: select
+  min(c2),
+  min(c4)
+from char_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  min(c2),
+  min(c4)
+from char_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238             
+PREHOOK: query: select
+  max(c2),
+  max(c4)
+from char_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  max(c2),
+  max(c4)
+from char_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@char_udf_1
+#### A masked pattern was here ####
+val_238	val_238             
+PREHOOK: query: drop table char_udf_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@char_udf_1
+PREHOOK: Output: default@char_udf_1
+POSTHOOK: query: drop table char_udf_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@char_udf_1
+POSTHOOK: Output: default@char_udf_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/input4.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4.q.java1.7.out b/ql/src/test/results/clientpositive/input4.q.java1.7.out
deleted file mode 100644
index eaeedcb..0000000
--- a/ql/src/test/results/clientpositive/input4.q.java1.7.out
+++ /dev/null
@@ -1,559 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@INPUT4
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@INPUT4
-PREHOOK: query: EXPLAIN
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-PREHOOK: type: LOAD
-POSTHOOK: query: EXPLAIN
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-POSTHOOK: type: LOAD
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: false
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.input4
-
-  Stage: Stage-1
-    Stats-Aggr Operator
-
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@input4
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@input4
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"ListSink":{}}}}}}}}}}
-PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-PREHOOK: type: QUERY
-PREHOOK: Input: default@input4
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@input4
-#### A masked pattern was here ####
-val_238	238
-val_86	86
-val_311	311
-val_27	27
-val_165	165
-val_409	409
-val_255	255
-val_278	278
-val_98	98
-val_484	484
-val_265	265
-val_193	193
-val_401	401
-val_150	150
-val_273	273
-val_224	224
-val_369	369
-val_66	66
-val_128	128
-val_213	213
-val_146	146
-val_406	406
-val_429	429
-val_374	374
-val_152	152
-val_469	469
-val_145	145
-val_495	495
-val_37	37
-val_327	327
-val_281	281
-val_277	277
-val_209	209
-val_15	15
-val_82	82
-val_403	403
-val_166	166
-val_417	417
-val_430	430
-val_252	252
-val_292	292
-val_219	219
-val_287	287
-val_153	153
-val_193	193
-val_338	338
-val_446	446
-val_459	459
-val_394	394
-val_237	237
-val_482	482
-val_174	174
-val_413	413
-val_494	494
-val_207	207
-val_199	199
-val_466	466
-val_208	208
-val_174	174
-val_399	399
-val_396	396
-val_247	247
-val_417	417
-val_489	489
-val_162	162
-val_377	377
-val_397	397
-val_309	309
-val_365	365
-val_266	266
-val_439	439
-val_342	342
-val_367	367
-val_325	325
-val_167	167
-val_195	195
-val_475	475
-val_17	17
-val_113	113
-val_155	155
-val_203	203
-val_339	339
-val_0	0
-val_455	455
-val_128	128
-val_311	311
-val_316	316
-val_57	57
-val_302	302
-val_205	205
-val_149	149
-val_438	438
-val_345	345
-val_129	129
-val_170	170
-val_20	20
-val_489	489
-val_157	157
-val_378	378
-val_221	221
-val_92	92
-val_111	111
-val_47	47
-val_72	72
-val_4	4
-val_280	280
-val_35	35
-val_427	427
-val_277	277
-val_208	208
-val_356	356
-val_399	399
-val_169	169
-val_382	382
-val_498	498
-val_125	125
-val_386	386
-val_437	437
-val_469	469
-val_192	192
-val_286	286
-val_187	187
-val_176	176
-val_54	54
-val_459	459
-val_51	51
-val_138	138
-val_103	103
-val_239	239
-val_213	213
-val_216	216
-val_430	430
-val_278	278
-val_176	176
-val_289	289
-val_221	221
-val_65	65
-val_318	318
-val_332	332
-val_311	311
-val_275	275
-val_137	137
-val_241	241
-val_83	83
-val_333	333
-val_180	180
-val_284	284
-val_12	12
-val_230	230
-val_181	181
-val_67	67
-val_260	260
-val_404	404
-val_384	384
-val_489	489
-val_353	353
-val_373	373
-val_272	272
-val_138	138
-val_217	217
-val_84	84
-val_348	348
-val_466	466
-val_58	58
-val_8	8
-val_411	411
-val_230	230
-val_208	208
-val_348	348
-val_24	24
-val_463	463
-val_431	431
-val_179	179
-val_172	172
-val_42	42
-val_129	129
-val_158	158
-val_119	119
-val_496	496
-val_0	0
-val_322	322
-val_197	197
-val_468	468
-val_393	393
-val_454	454
-val_100	100
-val_298	298
-val_199	199
-val_191	191
-val_418	418
-val_96	96
-val_26	26
-val_165	165
-val_327	327
-val_230	230
-val_205	205
-val_120	120
-val_131	131
-val_51	51
-val_404	404
-val_43	43
-val_436	436
-val_156	156
-val_469	469
-val_468	468
-val_308	308
-val_95	95
-val_196	196
-val_288	288
-val_481	481
-val_457	457
-val_98	98
-val_282	282
-val_197	197
-val_187	187
-val_318	318
-val_318	318
-val_409	409
-val_470	470
-val_137	137
-val_369	369
-val_316	316
-val_169	169
-val_413	413
-val_85	85
-val_77	77
-val_0	0
-val_490	490
-val_87	87
-val_364	364
-val_179	179
-val_118	118
-val_134	134
-val_395	395
-val_282	282
-val_138	138
-val_238	238
-val_419	419
-val_15	15
-val_118	118
-val_72	72
-val_90	90
-val_307	307
-val_19	19
-val_435	435
-val_10	10
-val_277	277
-val_273	273
-val_306	306
-val_224	224
-val_309	309
-val_389	389
-val_327	327
-val_242	242
-val_369	369
-val_392	392
-val_272	272
-val_331	331
-val_401	401
-val_242	242
-val_452	452
-val_177	177
-val_226	226
-val_5	5
-val_497	497
-val_402	402
-val_396	396
-val_317	317
-val_395	395
-val_58	58
-val_35	35
-val_336	336
-val_95	95
-val_11	11
-val_168	168
-val_34	34
-val_229	229
-val_233	233
-val_143	143
-val_472	472
-val_322	322
-val_498	498
-val_160	160
-val_195	195
-val_42	42
-val_321	321
-val_430	430
-val_119	119
-val_489	489
-val_458	458
-val_78	78
-val_76	76
-val_41	41
-val_223	223
-val_492	492
-val_149	149
-val_449	449
-val_218	218
-val_228	228
-val_138	138
-val_453	453
-val_30	30
-val_209	209
-val_64	64
-val_468	468
-val_76	76
-val_74	74
-val_342	342
-val_69	69
-val_230	230
-val_33	33
-val_368	368
-val_103	103
-val_296	296
-val_113	113
-val_216	216
-val_367	367
-val_344	344
-val_167	167
-val_274	274
-val_219	219
-val_239	239
-val_485	485
-val_116	116
-val_223	223
-val_256	256
-val_263	263
-val_70	70
-val_487	487
-val_480	480
-val_401	401
-val_288	288
-val_191	191
-val_5	5
-val_244	244
-val_438	438
-val_128	128
-val_467	467
-val_432	432
-val_202	202
-val_316	316
-val_229	229
-val_469	469
-val_463	463
-val_280	280
-val_2	2
-val_35	35
-val_283	283
-val_331	331
-val_235	235
-val_80	80
-val_44	44
-val_193	193
-val_321	321
-val_335	335
-val_104	104
-val_466	466
-val_366	366
-val_175	175
-val_403	403
-val_483	483
-val_53	53
-val_105	105
-val_257	257
-val_406	406
-val_409	409
-val_190	190
-val_406	406
-val_401	401
-val_114	114
-val_258	258
-val_90	90
-val_203	203
-val_262	262
-val_348	348
-val_424	424
-val_12	12
-val_396	396
-val_201	201
-val_217	217
-val_164	164
-val_431	431
-val_454	454
-val_478	478
-val_298	298
-val_125	125
-val_431	431
-val_164	164
-val_424	424
-val_187	187
-val_382	382
-val_5	5
-val_70	70
-val_397	397
-val_480	480
-val_291	291
-val_24	24
-val_351	351
-val_255	255
-val_104	104
-val_70	70
-val_163	163
-val_438	438
-val_119	119
-val_414	414
-val_200	200
-val_491	491
-val_237	237
-val_439	439
-val_360	360
-val_248	248
-val_479	479
-val_305	305
-val_417	417
-val_199	199
-val_444	444
-val_120	120
-val_429	429
-val_169	169
-val_443	443
-val_323	323
-val_325	325
-val_277	277
-val_230	230
-val_478	478
-val_178	178
-val_468	468
-val_310	310
-val_317	317
-val_333	333
-val_493	493
-val_460	460
-val_207	207
-val_249	249
-val_265	265
-val_480	480
-val_83	83
-val_136	136
-val_353	353
-val_172	172
-val_214	214
-val_462	462
-val_233	233
-val_406	406
-val_133	133
-val_175	175
-val_189	189
-val_454	454
-val_375	375
-val_401	401
-val_421	421
-val_407	407
-val_384	384
-val_256	256
-val_26	26
-val_134	134
-val_67	67
-val_384	384
-val_379	379
-val_18	18
-val_462	462
-val_492	492
-val_100	100
-val_298	298
-val_9	9
-val_341	341
-val_498	498
-val_146	146
-val_458	458
-val_362	362
-val_186	186
-val_285	285
-val_348	348
-val_167	167
-val_18	18
-val_273	273
-val_183	183
-val_281	281
-val_344	344
-val_97	97
-val_469	469
-val_315	315
-val_84	84
-val_28	28
-val_37	37
-val_448	448
-val_152	152
-val_348	348
-val_307	307
-val_194	194
-val_414	414
-val_477	477
-val_222	222
-val_126	126
-val_90	90
-val_169	169
-val_403	403
-val_400	400
-val_200	200
-val_97	97

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/input4.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4.q.java1.8.out b/ql/src/test/results/clientpositive/input4.q.java1.8.out
deleted file mode 100644
index eaeedcb..0000000
--- a/ql/src/test/results/clientpositive/input4.q.java1.8.out
+++ /dev/null
@@ -1,559 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@INPUT4
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@INPUT4
-PREHOOK: query: EXPLAIN
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-PREHOOK: type: LOAD
-POSTHOOK: query: EXPLAIN
-LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-POSTHOOK: type: LOAD
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-  Stage-1 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: false
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.input4
-
-  Stage: Stage-1
-    Stats-Aggr Operator
-
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@input4
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@input4
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"ListSink":{}}}}}}}}}}
-PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-PREHOOK: type: QUERY
-PREHOOK: Input: default@input4
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@input4
-#### A masked pattern was here ####
-val_238	238
-val_86	86
-val_311	311
-val_27	27
-val_165	165
-val_409	409
-val_255	255
-val_278	278
-val_98	98
-val_484	484
-val_265	265
-val_193	193
-val_401	401
-val_150	150
-val_273	273
-val_224	224
-val_369	369
-val_66	66
-val_128	128
-val_213	213
-val_146	146
-val_406	406
-val_429	429
-val_374	374
-val_152	152
-val_469	469
-val_145	145
-val_495	495
-val_37	37
-val_327	327
-val_281	281
-val_277	277
-val_209	209
-val_15	15
-val_82	82
-val_403	403
-val_166	166
-val_417	417
-val_430	430
-val_252	252
-val_292	292
-val_219	219
-val_287	287
-val_153	153
-val_193	193
-val_338	338
-val_446	446
-val_459	459
-val_394	394
-val_237	237
-val_482	482
-val_174	174
-val_413	413
-val_494	494
-val_207	207
-val_199	199
-val_466	466
-val_208	208
-val_174	174
-val_399	399
-val_396	396
-val_247	247
-val_417	417
-val_489	489
-val_162	162
-val_377	377
-val_397	397
-val_309	309
-val_365	365
-val_266	266
-val_439	439
-val_342	342
-val_367	367
-val_325	325
-val_167	167
-val_195	195
-val_475	475
-val_17	17
-val_113	113
-val_155	155
-val_203	203
-val_339	339
-val_0	0
-val_455	455
-val_128	128
-val_311	311
-val_316	316
-val_57	57
-val_302	302
-val_205	205
-val_149	149
-val_438	438
-val_345	345
-val_129	129
-val_170	170
-val_20	20
-val_489	489
-val_157	157
-val_378	378
-val_221	221
-val_92	92
-val_111	111
-val_47	47
-val_72	72
-val_4	4
-val_280	280
-val_35	35
-val_427	427
-val_277	277
-val_208	208
-val_356	356
-val_399	399
-val_169	169
-val_382	382
-val_498	498
-val_125	125
-val_386	386
-val_437	437
-val_469	469
-val_192	192
-val_286	286
-val_187	187
-val_176	176
-val_54	54
-val_459	459
-val_51	51
-val_138	138
-val_103	103
-val_239	239
-val_213	213
-val_216	216
-val_430	430
-val_278	278
-val_176	176
-val_289	289
-val_221	221
-val_65	65
-val_318	318
-val_332	332
-val_311	311
-val_275	275
-val_137	137
-val_241	241
-val_83	83
-val_333	333
-val_180	180
-val_284	284
-val_12	12
-val_230	230
-val_181	181
-val_67	67
-val_260	260
-val_404	404
-val_384	384
-val_489	489
-val_353	353
-val_373	373
-val_272	272
-val_138	138
-val_217	217
-val_84	84
-val_348	348
-val_466	466
-val_58	58
-val_8	8
-val_411	411
-val_230	230
-val_208	208
-val_348	348
-val_24	24
-val_463	463
-val_431	431
-val_179	179
-val_172	172
-val_42	42
-val_129	129
-val_158	158
-val_119	119
-val_496	496
-val_0	0
-val_322	322
-val_197	197
-val_468	468
-val_393	393
-val_454	454
-val_100	100
-val_298	298
-val_199	199
-val_191	191
-val_418	418
-val_96	96
-val_26	26
-val_165	165
-val_327	327
-val_230	230
-val_205	205
-val_120	120
-val_131	131
-val_51	51
-val_404	404
-val_43	43
-val_436	436
-val_156	156
-val_469	469
-val_468	468
-val_308	308
-val_95	95
-val_196	196
-val_288	288
-val_481	481
-val_457	457
-val_98	98
-val_282	282
-val_197	197
-val_187	187
-val_318	318
-val_318	318
-val_409	409
-val_470	470
-val_137	137
-val_369	369
-val_316	316
-val_169	169
-val_413	413
-val_85	85
-val_77	77
-val_0	0
-val_490	490
-val_87	87
-val_364	364
-val_179	179
-val_118	118
-val_134	134
-val_395	395
-val_282	282
-val_138	138
-val_238	238
-val_419	419
-val_15	15
-val_118	118
-val_72	72
-val_90	90
-val_307	307
-val_19	19
-val_435	435
-val_10	10
-val_277	277
-val_273	273
-val_306	306
-val_224	224
-val_309	309
-val_389	389
-val_327	327
-val_242	242
-val_369	369
-val_392	392
-val_272	272
-val_331	331
-val_401	401
-val_242	242
-val_452	452
-val_177	177
-val_226	226
-val_5	5
-val_497	497
-val_402	402
-val_396	396
-val_317	317
-val_395	395
-val_58	58
-val_35	35
-val_336	336
-val_95	95
-val_11	11
-val_168	168
-val_34	34
-val_229	229
-val_233	233
-val_143	143
-val_472	472
-val_322	322
-val_498	498
-val_160	160
-val_195	195
-val_42	42
-val_321	321
-val_430	430
-val_119	119
-val_489	489
-val_458	458
-val_78	78
-val_76	76
-val_41	41
-val_223	223
-val_492	492
-val_149	149
-val_449	449
-val_218	218
-val_228	228
-val_138	138
-val_453	453
-val_30	30
-val_209	209
-val_64	64
-val_468	468
-val_76	76
-val_74	74
-val_342	342
-val_69	69
-val_230	230
-val_33	33
-val_368	368
-val_103	103
-val_296	296
-val_113	113
-val_216	216
-val_367	367
-val_344	344
-val_167	167
-val_274	274
-val_219	219
-val_239	239
-val_485	485
-val_116	116
-val_223	223
-val_256	256
-val_263	263
-val_70	70
-val_487	487
-val_480	480
-val_401	401
-val_288	288
-val_191	191
-val_5	5
-val_244	244
-val_438	438
-val_128	128
-val_467	467
-val_432	432
-val_202	202
-val_316	316
-val_229	229
-val_469	469
-val_463	463
-val_280	280
-val_2	2
-val_35	35
-val_283	283
-val_331	331
-val_235	235
-val_80	80
-val_44	44
-val_193	193
-val_321	321
-val_335	335
-val_104	104
-val_466	466
-val_366	366
-val_175	175
-val_403	403
-val_483	483
-val_53	53
-val_105	105
-val_257	257
-val_406	406
-val_409	409
-val_190	190
-val_406	406
-val_401	401
-val_114	114
-val_258	258
-val_90	90
-val_203	203
-val_262	262
-val_348	348
-val_424	424
-val_12	12
-val_396	396
-val_201	201
-val_217	217
-val_164	164
-val_431	431
-val_454	454
-val_478	478
-val_298	298
-val_125	125
-val_431	431
-val_164	164
-val_424	424
-val_187	187
-val_382	382
-val_5	5
-val_70	70
-val_397	397
-val_480	480
-val_291	291
-val_24	24
-val_351	351
-val_255	255
-val_104	104
-val_70	70
-val_163	163
-val_438	438
-val_119	119
-val_414	414
-val_200	200
-val_491	491
-val_237	237
-val_439	439
-val_360	360
-val_248	248
-val_479	479
-val_305	305
-val_417	417
-val_199	199
-val_444	444
-val_120	120
-val_429	429
-val_169	169
-val_443	443
-val_323	323
-val_325	325
-val_277	277
-val_230	230
-val_478	478
-val_178	178
-val_468	468
-val_310	310
-val_317	317
-val_333	333
-val_493	493
-val_460	460
-val_207	207
-val_249	249
-val_265	265
-val_480	480
-val_83	83
-val_136	136
-val_353	353
-val_172	172
-val_214	214
-val_462	462
-val_233	233
-val_406	406
-val_133	133
-val_175	175
-val_189	189
-val_454	454
-val_375	375
-val_401	401
-val_421	421
-val_407	407
-val_384	384
-val_256	256
-val_26	26
-val_134	134
-val_67	67
-val_384	384
-val_379	379
-val_18	18
-val_462	462
-val_492	492
-val_100	100
-val_298	298
-val_9	9
-val_341	341
-val_498	498
-val_146	146
-val_458	458
-val_362	362
-val_186	186
-val_285	285
-val_348	348
-val_167	167
-val_18	18
-val_273	273
-val_183	183
-val_281	281
-val_344	344
-val_97	97
-val_469	469
-val_315	315
-val_84	84
-val_28	28
-val_37	37
-val_448	448
-val_152	152
-val_348	348
-val_307	307
-val_194	194
-val_414	414
-val_477	477
-val_222	222
-val_126	126
-val_90	90
-val_169	169
-val_403	403
-val_400	400
-val_200	200
-val_97	97

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/input4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input4.q.out b/ql/src/test/results/clientpositive/input4.q.out
new file mode 100644
index 0000000..83912f6
--- /dev/null
+++ b/ql/src/test/results/clientpositive/input4.q.out
@@ -0,0 +1,555 @@
+PREHOOK: query: CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@INPUT4
+POSTHOOK: query: CREATE TABLE INPUT4(KEY STRING, VALUE STRING) STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@INPUT4
+PREHOOK: query: EXPLAIN
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
+PREHOOK: type: LOAD
+POSTHOOK: query: EXPLAIN
+LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
+POSTHOOK: type: LOAD
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+  Stage-1 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-0
+    Move Operator
+      tables:
+          replace: false
+          table:
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.input4
+
+  Stage: Stage-1
+    Stats-Aggr Operator
+
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@input4
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE INPUT4
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@input4
+PREHOOK: query: EXPLAIN FORMATTED
+SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FORMATTED
+SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
+POSTHOOK: type: QUERY
+{"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","children":{"ListSink":{}}}}}}}}}}
+PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
+PREHOOK: type: QUERY
+PREHOOK: Input: default@input4
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@input4
+#### A masked pattern was here ####
+val_238	238
+val_86	86
+val_311	311
+val_27	27
+val_165	165
+val_409	409
+val_255	255
+val_278	278
+val_98	98
+val_484	484
+val_265	265
+val_193	193
+val_401	401
+val_150	150
+val_273	273
+val_224	224
+val_369	369
+val_66	66
+val_128	128
+val_213	213
+val_146	146
+val_406	406
+val_429	429
+val_374	374
+val_152	152
+val_469	469
+val_145	145
+val_495	495
+val_37	37
+val_327	327
+val_281	281
+val_277	277
+val_209	209
+val_15	15
+val_82	82
+val_403	403
+val_166	166
+val_417	417
+val_430	430
+val_252	252
+val_292	292
+val_219	219
+val_287	287
+val_153	153
+val_193	193
+val_338	338
+val_446	446
+val_459	459
+val_394	394
+val_237	237
+val_482	482
+val_174	174
+val_413	413
+val_494	494
+val_207	207
+val_199	199
+val_466	466
+val_208	208
+val_174	174
+val_399	399
+val_396	396
+val_247	247
+val_417	417
+val_489	489
+val_162	162
+val_377	377
+val_397	397
+val_309	309
+val_365	365
+val_266	266
+val_439	439
+val_342	342
+val_367	367
+val_325	325
+val_167	167
+val_195	195
+val_475	475
+val_17	17
+val_113	113
+val_155	155
+val_203	203
+val_339	339
+val_0	0
+val_455	455
+val_128	128
+val_311	311
+val_316	316
+val_57	57
+val_302	302
+val_205	205
+val_149	149
+val_438	438
+val_345	345
+val_129	129
+val_170	170
+val_20	20
+val_489	489
+val_157	157
+val_378	378
+val_221	221
+val_92	92
+val_111	111
+val_47	47
+val_72	72
+val_4	4
+val_280	280
+val_35	35
+val_427	427
+val_277	277
+val_208	208
+val_356	356
+val_399	399
+val_169	169
+val_382	382
+val_498	498
+val_125	125
+val_386	386
+val_437	437
+val_469	469
+val_192	192
+val_286	286
+val_187	187
+val_176	176
+val_54	54
+val_459	459
+val_51	51
+val_138	138
+val_103	103
+val_239	239
+val_213	213
+val_216	216
+val_430	430
+val_278	278
+val_176	176
+val_289	289
+val_221	221
+val_65	65
+val_318	318
+val_332	332
+val_311	311
+val_275	275
+val_137	137
+val_241	241
+val_83	83
+val_333	333
+val_180	180
+val_284	284
+val_12	12
+val_230	230
+val_181	181
+val_67	67
+val_260	260
+val_404	404
+val_384	384
+val_489	489
+val_353	353
+val_373	373
+val_272	272
+val_138	138
+val_217	217
+val_84	84
+val_348	348
+val_466	466
+val_58	58
+val_8	8
+val_411	411
+val_230	230
+val_208	208
+val_348	348
+val_24	24
+val_463	463
+val_431	431
+val_179	179
+val_172	172
+val_42	42
+val_129	129
+val_158	158
+val_119	119
+val_496	496
+val_0	0
+val_322	322
+val_197	197
+val_468	468
+val_393	393
+val_454	454
+val_100	100
+val_298	298
+val_199	199
+val_191	191
+val_418	418
+val_96	96
+val_26	26
+val_165	165
+val_327	327
+val_230	230
+val_205	205
+val_120	120
+val_131	131
+val_51	51
+val_404	404
+val_43	43
+val_436	436
+val_156	156
+val_469	469
+val_468	468
+val_308	308
+val_95	95
+val_196	196
+val_288	288
+val_481	481
+val_457	457
+val_98	98
+val_282	282
+val_197	197
+val_187	187
+val_318	318
+val_318	318
+val_409	409
+val_470	470
+val_137	137
+val_369	369
+val_316	316
+val_169	169
+val_413	413
+val_85	85
+val_77	77
+val_0	0
+val_490	490
+val_87	87
+val_364	364
+val_179	179
+val_118	118
+val_134	134
+val_395	395
+val_282	282
+val_138	138
+val_238	238
+val_419	419
+val_15	15
+val_118	118
+val_72	72
+val_90	90
+val_307	307
+val_19	19
+val_435	435
+val_10	10
+val_277	277
+val_273	273
+val_306	306
+val_224	224
+val_309	309
+val_389	389
+val_327	327
+val_242	242
+val_369	369
+val_392	392
+val_272	272
+val_331	331
+val_401	401
+val_242	242
+val_452	452
+val_177	177
+val_226	226
+val_5	5
+val_497	497
+val_402	402
+val_396	396
+val_317	317
+val_395	395
+val_58	58
+val_35	35
+val_336	336
+val_95	95
+val_11	11
+val_168	168
+val_34	34
+val_229	229
+val_233	233
+val_143	143
+val_472	472
+val_322	322
+val_498	498
+val_160	160
+val_195	195
+val_42	42
+val_321	321
+val_430	430
+val_119	119
+val_489	489
+val_458	458
+val_78	78
+val_76	76
+val_41	41
+val_223	223
+val_492	492
+val_149	149
+val_449	449
+val_218	218
+val_228	228
+val_138	138
+val_453	453
+val_30	30
+val_209	209
+val_64	64
+val_468	468
+val_76	76
+val_74	74
+val_342	342
+val_69	69
+val_230	230
+val_33	33
+val_368	368
+val_103	103
+val_296	296
+val_113	113
+val_216	216
+val_367	367
+val_344	344
+val_167	167
+val_274	274
+val_219	219
+val_239	239
+val_485	485
+val_116	116
+val_223	223
+val_256	256
+val_263	263
+val_70	70
+val_487	487
+val_480	480
+val_401	401
+val_288	288
+val_191	191
+val_5	5
+val_244	244
+val_438	438
+val_128	128
+val_467	467
+val_432	432
+val_202	202
+val_316	316
+val_229	229
+val_469	469
+val_463	463
+val_280	280
+val_2	2
+val_35	35
+val_283	283
+val_331	331
+val_235	235
+val_80	80
+val_44	44
+val_193	193
+val_321	321
+val_335	335
+val_104	104
+val_466	466
+val_366	366
+val_175	175
+val_403	403
+val_483	483
+val_53	53
+val_105	105
+val_257	257
+val_406	406
+val_409	409
+val_190	190
+val_406	406
+val_401	401
+val_114	114
+val_258	258
+val_90	90
+val_203	203
+val_262	262
+val_348	348
+val_424	424
+val_12	12
+val_396	396
+val_201	201
+val_217	217
+val_164	164
+val_431	431
+val_454	454
+val_478	478
+val_298	298
+val_125	125
+val_431	431
+val_164	164
+val_424	424
+val_187	187
+val_382	382
+val_5	5
+val_70	70
+val_397	397
+val_480	480
+val_291	291
+val_24	24
+val_351	351
+val_255	255
+val_104	104
+val_70	70
+val_163	163
+val_438	438
+val_119	119
+val_414	414
+val_200	200
+val_491	491
+val_237	237
+val_439	439
+val_360	360
+val_248	248
+val_479	479
+val_305	305
+val_417	417
+val_199	199
+val_444	444
+val_120	120
+val_429	429
+val_169	169
+val_443	443
+val_323	323
+val_325	325
+val_277	277
+val_230	230
+val_478	478
+val_178	178
+val_468	468
+val_310	310
+val_317	317
+val_333	333
+val_493	493
+val_460	460
+val_207	207
+val_249	249
+val_265	265
+val_480	480
+val_83	83
+val_136	136
+val_353	353
+val_172	172
+val_214	214
+val_462	462
+val_233	233
+val_406	406
+val_133	133
+val_175	175
+val_189	189
+val_454	454
+val_375	375
+val_401	401
+val_421	421
+val_407	407
+val_384	384
+val_256	256
+val_26	26
+val_134	134
+val_67	67
+val_384	384
+val_379	379
+val_18	18
+val_462	462
+val_492	492
+val_100	100
+val_298	298
+val_9	9
+val_341	341
+val_498	498
+val_146	146
+val_458	458
+val_362	362
+val_186	186
+val_285	285
+val_348	348
+val_167	167
+val_18	18
+val_273	273
+val_183	183
+val_281	281
+val_344	344
+val_97	97
+val_469	469
+val_315	315
+val_84	84
+val_28	28
+val_37	37
+val_448	448
+val_152	152
+val_348	348
+val_307	307
+val_194	194
+val_414	414
+val_477	477
+val_222	222
+val_126	126
+val_90	90
+val_169	169
+val_403	403
+val_400	400
+val_200	200
+val_97	97

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join0.q.java1.7.out b/ql/src/test/results/clientpositive/join0.q.java1.7.out
deleted file mode 100644
index 343f8a4..0000000
--- a/ql/src/test/results/clientpositive/join0.q.java1.7.out
+++ /dev/null
@@ -1,240 +0,0 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key < 10) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string)
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key < 10) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COM
 PLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}}],"Reduce Operator Tree:":{"Join Operator":{"condition map:":[{"":"Inner Join 0 to 1"}],"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","outp
 ut format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}}}}}},"Stage-2":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)","sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}}}}],"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoo
 p.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/join0.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join0.q.java1.8.out b/ql/src/test/results/clientpositive/join0.q.java1.8.out
deleted file mode 100644
index 343f8a4..0000000
--- a/ql/src/test/results/clientpositive/join0.q.java1.8.out
+++ /dev/null
@@ -1,240 +0,0 @@
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key < 10) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string)
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key < 10) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: string), _col1 (type: string)
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Inner Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-              sort order: ++++
-              Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-{"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-2":{"DEPENDENT STAGES":"Stage-1"},"Stage-0":{"DEPENDENT STAGES":"Stage-2"}},"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}},{"TableScan":{"alias:":"src","Statistics:":"Num rows: 500 Data size: 5312 Basic stats: COM
 PLETE Column stats: NONE","children":{"Filter Operator":{"predicate:":"(key < 10) (type: boolean)","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Select Operator":{"expressions:":"key (type: string), value (type: string)","outputColumnNames:":["_col0","_col1"],"Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","children":{"Reduce Output Operator":{"sort order:":"","Statistics:":"Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE","value expressions:":"_col0 (type: string), _col1 (type: string)"}}}}}}}}],"Reduce Operator Tree:":{"Join Operator":{"condition map:":[{"":"Inner Join 0 to 1"}],"keys:":{},"outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","table:":{"input format:":"org.apache.hadoop.mapred.SequenceFileInputFormat","outp
 ut format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe"}}}}}}},"Stage-2":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"children":{"Reduce Output Operator":{"key expressions:":"_col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)","sort order:":"++++","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE"}}}}],"Reduce Operator Tree:":{"Select Operator":{"expressions:":"KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)","outputColumnNames:":["_col0","_col1","_col2","_col3"],"Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","children":{"File Output Operator":{"compressed:":"false","Statistics:":"Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE","table:":{"input format:":"org.apache.hadoo
 p.mapred.SequenceFileInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}}}}}}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}}}
-Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9


[63/66] [abbrv] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join34.q.out b/ql/src/test/results/clientpositive/spark/join34.q.out
index 235d36a..ebd9c89 100644
--- a/ql/src/test/results/clientpositive/spark/join34.q.out
+++ b/ql/src/test/results/clientpositive/spark/join34.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -98,7 +98,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -150,7 +150,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -170,7 +170,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -222,7 +222,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -242,7 +242,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out b/ql/src/test/results/clientpositive/spark/join35.q.out
index 7b873c6..d14dadf 100644
--- a/ql/src/test/results/clientpositive/spark/join35.q.out
+++ b/ql/src/test/results/clientpositive/spark/join35.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -106,7 +106,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -164,7 +164,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -184,7 +184,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -236,7 +236,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -256,7 +256,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out
index 4119855..05aa50b 100644
--- a/ql/src/test/results/clientpositive/spark/join9.q.out
+++ b/ql/src/test/results/clientpositive/spark/join9.q.out
@@ -64,7 +64,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -135,7 +135,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -155,7 +155,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out b/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
index 4d6d39d..5d7cecf 100644
--- a/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -82,7 +82,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -129,7 +129,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -149,7 +149,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -247,7 +247,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -731,7 +731,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
index b528357..ee70e71 100644
--- a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
+++ b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out
@@ -151,7 +151,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -197,7 +197,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -243,7 +243,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -289,7 +289,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
index 3d90dc4..6cfde3f 100644
--- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
@@ -63,7 +63,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -83,7 +83,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -138,7 +138,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -184,7 +184,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -370,7 +370,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -416,7 +416,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -462,7 +462,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -508,7 +508,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -582,7 +582,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -602,7 +602,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -778,7 +778,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -798,7 +798,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -853,7 +853,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -899,7 +899,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1081,7 +1081,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1127,7 +1127,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1199,7 +1199,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1219,7 +1219,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
index aea47f7..1900ed6 100644
--- a/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
@@ -50,7 +50,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -70,7 +70,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -120,7 +120,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -140,7 +140,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -237,7 +237,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -283,7 +283,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -329,7 +329,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -375,7 +375,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
index 4aecb73..ec43c12 100644
--- a/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out
@@ -181,7 +181,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -201,7 +201,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -255,7 +255,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -300,7 +300,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -345,7 +345,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -390,7 +390,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -556,7 +556,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -576,7 +576,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -630,7 +630,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -676,7 +676,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -722,7 +722,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -768,7 +768,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -945,7 +945,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -965,7 +965,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1019,7 +1019,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1064,7 +1064,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1109,7 +1109,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1154,7 +1154,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1334,7 +1334,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1354,7 +1354,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1400,7 +1400,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1420,7 +1420,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1549,7 +1549,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1569,7 +1569,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1616,7 +1616,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1636,7 +1636,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1755,7 +1755,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1775,7 +1775,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out
index cd16787..cbebbdd 100644
--- a/ql/src/test/results/clientpositive/spark/pcr.q.out
+++ b/ql/src/test/results/clientpositive/spark/pcr.q.out
@@ -3942,7 +3942,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -4077,7 +4077,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -4123,7 +4123,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -4262,7 +4262,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -4308,7 +4308,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
index 6b1cadf..fbba885 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -87,7 +87,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -141,7 +141,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -161,7 +161,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -354,7 +354,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -374,7 +374,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -428,7 +428,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -448,7 +448,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -641,7 +641,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -661,7 +661,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -715,7 +715,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -735,7 +735,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -928,7 +928,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -948,7 +948,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -1002,7 +1002,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1022,7 +1022,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
index 1149f16..9629768 100644
--- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
@@ -63,7 +63,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -83,7 +83,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -138,7 +138,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -184,7 +184,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -230,7 +230,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -276,7 +276,7 @@ STAGE PLANS:
                     ds 2008-04-09
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -473,7 +473,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -519,7 +519,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -591,7 +591,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -611,7 +611,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -778,7 +778,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -798,7 +798,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'
@@ -853,7 +853,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -899,7 +899,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1085,7 +1085,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1131,7 +1131,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 12
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1203,7 +1203,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1223,7 +1223,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count -1
                       columns key,value
                       columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample1.q.out b/ql/src/test/results/clientpositive/spark/sample1.q.out
index eb9d5f6..4bd5c8c 100644
--- a/ql/src/test/results/clientpositive/spark/sample1.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample1.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample2.q.out b/ql/src/test/results/clientpositive/spark/sample2.q.out
index b13f818..fe9e2f5 100644
--- a/ql/src/test/results/clientpositive/spark/sample2.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample2.q.out
@@ -83,7 +83,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -104,7 +104,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample4.q.out b/ql/src/test/results/clientpositive/spark/sample4.q.out
index 69066c1..987a445 100644
--- a/ql/src/test/results/clientpositive/spark/sample4.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample4.q.out
@@ -83,7 +83,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -104,7 +104,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample5.q.out b/ql/src/test/results/clientpositive/spark/sample5.q.out
index 819939c..77477ba 100644
--- a/ql/src/test/results/clientpositive/spark/sample5.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample5.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -105,7 +105,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample6.q.out b/ql/src/test/results/clientpositive/spark/sample6.q.out
index bf06004..2ed7d7a 100644
--- a/ql/src/test/results/clientpositive/spark/sample6.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample6.q.out
@@ -81,7 +81,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -102,7 +102,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value
@@ -472,7 +472,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -493,7 +493,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value
@@ -847,7 +847,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -868,7 +868,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value
@@ -1475,7 +1475,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -1496,7 +1496,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value
@@ -1946,7 +1946,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -1967,7 +1967,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value
@@ -2404,7 +2404,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 4
                     bucket_field_name key
                     columns key,value
@@ -2425,7 +2425,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 4
                       bucket_field_name key
                       columns key,value
@@ -2450,7 +2450,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 4
                     bucket_field_name key
                     columns key,value
@@ -2471,7 +2471,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 4
                       bucket_field_name key
                       columns key,value
@@ -2709,7 +2709,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 4
                     bucket_field_name key
                     columns key,value
@@ -2730,7 +2730,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 4
                       bucket_field_name key
                       columns key,value

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/spark/sample7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample7.q.out b/ql/src/test/results/clientpositive/spark/sample7.q.out
index a821c76..784000d 100644
--- a/ql/src/test/results/clientpositive/spark/sample7.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample7.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
                   input format: org.apache.hadoop.mapred.TextInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count 2
                     bucket_field_name key
                     columns key,value
@@ -103,7 +103,7 @@ STAGE PLANS:
                     input format: org.apache.hadoop.mapred.TextInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                     properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                       bucket_count 2
                       bucket_field_name key
                       columns key,value


[08/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query88.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query88.q.out b/ql/src/test/results/clientpositive/perf/query88.q.out
index e3f69d3..9981af5 100644
--- a/ql/src/test/results/clientpositive/perf/query88.q.out
+++ b/ql/src/test/results/clientpositive/perf/query88.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[354][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5, $hdt$_6, $hdt$_7]] in Stage 'Reducer 6' is a cross product
+Warning: Shuffle Join MERGEJOIN[347][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5, $hdt$_6, $hdt$_7]] in Stage 'Reducer 6' is a cross product
 PREHOOK: query: explain
 select  *
 from
@@ -225,55 +225,55 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 6
-      File Output Operator [FS_225]
-        Select Operator [SEL_224] (rows=7 width=8)
+      File Output Operator [FS_218]
+        Select Operator [SEL_217] (rows=7 width=8)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-          Merge Join Operator [MERGEJOIN_354] (rows=7 width=8)
+          Merge Join Operator [MERGEJOIN_347] (rows=7 width=8)
             Conds:(Inner),(Inner),(Inner),(Inner),(Inner),(Inner),(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
           <-Reducer 14 [SIMPLE_EDGE]
-            SHUFFLE [RS_216]
+            SHUFFLE [RS_209]
               Group By Operator [GBY_50] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 13 [SIMPLE_EDGE]
                 SHUFFLE [RS_49]
                   Group By Operator [GBY_48] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_335] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_328] (rows=766650239 width=88)
                       Conds:RS_44._col2=RS_45._col0(Inner)
                     <-Map 17 [SIMPLE_EDGE]
                       SHUFFLE [RS_45]
                         PartitionCols:_col0
                         Select Operator [SEL_37] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_305] (rows=852 width=1910)
+                          Filter Operator [FIL_298] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_35] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 12 [SIMPLE_EDGE]
                       SHUFFLE [RS_44]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_334] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_327] (rows=696954748 width=88)
                           Conds:RS_41._col1=RS_42._col0(Inner),Output:["_col2"]
                         <-Map 16 [SIMPLE_EDGE]
                           SHUFFLE [RS_42]
                             PartitionCols:_col0
                             Select Operator [SEL_34] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_304] (rows=3600 width=107)
+                              Filter Operator [FIL_297] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_32] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 11 [SIMPLE_EDGE]
                           SHUFFLE [RS_41]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_333] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_326] (rows=633595212 width=88)
                               Conds:RS_38._col0=RS_39._col0(Inner),Output:["_col1","_col2"]
                             <-Map 10 [SIMPLE_EDGE]
                               SHUFFLE [RS_38]
                                 PartitionCols:_col0
                                 Select Operator [SEL_28] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_302] (rows=575995635 width=88)
+                                  Filter Operator [FIL_295] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_26] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -282,54 +282,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_31] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_303] (rows=14400 width=471)
+                                  Filter Operator [FIL_296] (rows=14400 width=471)
                                     predicate:((t_hour = 12) and (t_minute < 30) and t_time_sk is not null)
                                     TableScan [TS_29] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 22 [SIMPLE_EDGE]
-            SHUFFLE [RS_217]
+            SHUFFLE [RS_210]
               Group By Operator [GBY_76] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 21 [SIMPLE_EDGE]
                 SHUFFLE [RS_75]
                   Group By Operator [GBY_74] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_338] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_331] (rows=766650239 width=88)
                       Conds:RS_70._col2=RS_71._col0(Inner)
                     <-Map 25 [SIMPLE_EDGE]
                       SHUFFLE [RS_71]
                         PartitionCols:_col0
                         Select Operator [SEL_63] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_309] (rows=852 width=1910)
+                          Filter Operator [FIL_302] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_61] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 20 [SIMPLE_EDGE]
                       SHUFFLE [RS_70]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_337] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_330] (rows=696954748 width=88)
                           Conds:RS_67._col1=RS_68._col0(Inner),Output:["_col2"]
                         <-Map 24 [SIMPLE_EDGE]
                           SHUFFLE [RS_68]
                             PartitionCols:_col0
                             Select Operator [SEL_60] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_308] (rows=3600 width=107)
+                              Filter Operator [FIL_301] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_58] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 19 [SIMPLE_EDGE]
                           SHUFFLE [RS_67]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_336] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_329] (rows=633595212 width=88)
                               Conds:RS_64._col0=RS_65._col0(Inner),Output:["_col1","_col2"]
                             <-Map 18 [SIMPLE_EDGE]
                               SHUFFLE [RS_64]
                                 PartitionCols:_col0
                                 Select Operator [SEL_54] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_306] (rows=575995635 width=88)
+                                  Filter Operator [FIL_299] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_52] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -338,54 +338,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_57] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_307] (rows=14400 width=471)
+                                  Filter Operator [FIL_300] (rows=14400 width=471)
                                     predicate:((t_hour = 11) and (t_minute >= 30) and t_time_sk is not null)
                                     TableScan [TS_55] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 30 [SIMPLE_EDGE]
-            SHUFFLE [RS_218]
+            SHUFFLE [RS_211]
               Group By Operator [GBY_102] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 29 [SIMPLE_EDGE]
                 SHUFFLE [RS_101]
                   Group By Operator [GBY_100] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_341] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_334] (rows=766650239 width=88)
                       Conds:RS_96._col2=RS_97._col0(Inner)
                     <-Map 33 [SIMPLE_EDGE]
                       SHUFFLE [RS_97]
                         PartitionCols:_col0
                         Select Operator [SEL_89] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_313] (rows=852 width=1910)
+                          Filter Operator [FIL_306] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_87] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 28 [SIMPLE_EDGE]
                       SHUFFLE [RS_96]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_340] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_333] (rows=696954748 width=88)
                           Conds:RS_93._col1=RS_94._col0(Inner),Output:["_col2"]
                         <-Map 32 [SIMPLE_EDGE]
                           SHUFFLE [RS_94]
                             PartitionCols:_col0
                             Select Operator [SEL_86] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_312] (rows=3600 width=107)
+                              Filter Operator [FIL_305] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_84] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 27 [SIMPLE_EDGE]
                           SHUFFLE [RS_93]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_339] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_332] (rows=633595212 width=88)
                               Conds:RS_90._col0=RS_91._col0(Inner),Output:["_col1","_col2"]
                             <-Map 26 [SIMPLE_EDGE]
                               SHUFFLE [RS_90]
                                 PartitionCols:_col0
                                 Select Operator [SEL_80] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_310] (rows=575995635 width=88)
+                                  Filter Operator [FIL_303] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_78] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -394,54 +394,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_83] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_311] (rows=14400 width=471)
+                                  Filter Operator [FIL_304] (rows=14400 width=471)
                                     predicate:((t_hour = 11) and (t_minute < 30) and t_time_sk is not null)
                                     TableScan [TS_81] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 38 [SIMPLE_EDGE]
-            SHUFFLE [RS_219]
+            SHUFFLE [RS_212]
               Group By Operator [GBY_128] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 37 [SIMPLE_EDGE]
                 SHUFFLE [RS_127]
                   Group By Operator [GBY_126] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_344] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_337] (rows=766650239 width=88)
                       Conds:RS_122._col2=RS_123._col0(Inner)
                     <-Map 41 [SIMPLE_EDGE]
                       SHUFFLE [RS_123]
                         PartitionCols:_col0
                         Select Operator [SEL_115] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_317] (rows=852 width=1910)
+                          Filter Operator [FIL_310] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_113] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 36 [SIMPLE_EDGE]
                       SHUFFLE [RS_122]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_343] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_336] (rows=696954748 width=88)
                           Conds:RS_119._col1=RS_120._col0(Inner),Output:["_col2"]
                         <-Map 40 [SIMPLE_EDGE]
                           SHUFFLE [RS_120]
                             PartitionCols:_col0
                             Select Operator [SEL_112] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_316] (rows=3600 width=107)
+                              Filter Operator [FIL_309] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_110] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 35 [SIMPLE_EDGE]
                           SHUFFLE [RS_119]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_342] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_335] (rows=633595212 width=88)
                               Conds:RS_116._col0=RS_117._col0(Inner),Output:["_col1","_col2"]
                             <-Map 34 [SIMPLE_EDGE]
                               SHUFFLE [RS_116]
                                 PartitionCols:_col0
                                 Select Operator [SEL_106] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_314] (rows=575995635 width=88)
+                                  Filter Operator [FIL_307] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_104] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -450,54 +450,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_109] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_315] (rows=14400 width=471)
+                                  Filter Operator [FIL_308] (rows=14400 width=471)
                                     predicate:((t_hour = 10) and (t_minute >= 30) and t_time_sk is not null)
                                     TableScan [TS_107] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 46 [SIMPLE_EDGE]
-            SHUFFLE [RS_220]
+            SHUFFLE [RS_213]
               Group By Operator [GBY_154] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 45 [SIMPLE_EDGE]
                 SHUFFLE [RS_153]
                   Group By Operator [GBY_152] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_347] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_340] (rows=766650239 width=88)
                       Conds:RS_148._col2=RS_149._col0(Inner)
                     <-Map 49 [SIMPLE_EDGE]
                       SHUFFLE [RS_149]
                         PartitionCols:_col0
                         Select Operator [SEL_141] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_321] (rows=852 width=1910)
+                          Filter Operator [FIL_314] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_139] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 44 [SIMPLE_EDGE]
                       SHUFFLE [RS_148]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_346] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_339] (rows=696954748 width=88)
                           Conds:RS_145._col1=RS_146._col0(Inner),Output:["_col2"]
                         <-Map 48 [SIMPLE_EDGE]
                           SHUFFLE [RS_146]
                             PartitionCols:_col0
                             Select Operator [SEL_138] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_320] (rows=3600 width=107)
+                              Filter Operator [FIL_313] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_136] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 43 [SIMPLE_EDGE]
                           SHUFFLE [RS_145]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_345] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_338] (rows=633595212 width=88)
                               Conds:RS_142._col0=RS_143._col0(Inner),Output:["_col1","_col2"]
                             <-Map 42 [SIMPLE_EDGE]
                               SHUFFLE [RS_142]
                                 PartitionCols:_col0
                                 Select Operator [SEL_132] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_318] (rows=575995635 width=88)
+                                  Filter Operator [FIL_311] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_130] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -506,54 +506,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_135] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_319] (rows=14400 width=471)
+                                  Filter Operator [FIL_312] (rows=14400 width=471)
                                     predicate:((t_hour = 10) and (t_minute < 30) and t_time_sk is not null)
                                     TableScan [TS_133] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 5 [SIMPLE_EDGE]
-            SHUFFLE [RS_215]
+            SHUFFLE [RS_208]
               Group By Operator [GBY_24] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 4 [SIMPLE_EDGE]
                 SHUFFLE [RS_23]
                   Group By Operator [GBY_22] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_332] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_325] (rows=766650239 width=88)
                       Conds:RS_18._col2=RS_19._col0(Inner)
                     <-Map 9 [SIMPLE_EDGE]
                       SHUFFLE [RS_19]
                         PartitionCols:_col0
                         Select Operator [SEL_11] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_301] (rows=852 width=1910)
+                          Filter Operator [FIL_294] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_9] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 3 [SIMPLE_EDGE]
                       SHUFFLE [RS_18]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_331] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_324] (rows=696954748 width=88)
                           Conds:RS_15._col1=RS_16._col0(Inner),Output:["_col2"]
                         <-Map 8 [SIMPLE_EDGE]
                           SHUFFLE [RS_16]
                             PartitionCols:_col0
                             Select Operator [SEL_8] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_300] (rows=3600 width=107)
+                              Filter Operator [FIL_293] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_6] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 2 [SIMPLE_EDGE]
                           SHUFFLE [RS_15]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_330] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_323] (rows=633595212 width=88)
                               Conds:RS_12._col0=RS_13._col0(Inner),Output:["_col1","_col2"]
                             <-Map 1 [SIMPLE_EDGE]
                               SHUFFLE [RS_12]
                                 PartitionCols:_col0
                                 Select Operator [SEL_2] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_298] (rows=575995635 width=88)
+                                  Filter Operator [FIL_291] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_0] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -562,54 +562,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_5] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_299] (rows=14400 width=471)
+                                  Filter Operator [FIL_292] (rows=14400 width=471)
                                     predicate:((t_hour = 8) and (t_minute >= 30) and t_time_sk is not null)
                                     TableScan [TS_3] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 54 [SIMPLE_EDGE]
-            SHUFFLE [RS_221]
+            SHUFFLE [RS_214]
               Group By Operator [GBY_180] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 53 [SIMPLE_EDGE]
                 SHUFFLE [RS_179]
                   Group By Operator [GBY_178] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_350] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_343] (rows=766650239 width=88)
                       Conds:RS_174._col2=RS_175._col0(Inner)
                     <-Map 57 [SIMPLE_EDGE]
                       SHUFFLE [RS_175]
                         PartitionCols:_col0
                         Select Operator [SEL_167] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_325] (rows=852 width=1910)
+                          Filter Operator [FIL_318] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_165] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 52 [SIMPLE_EDGE]
                       SHUFFLE [RS_174]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_349] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_342] (rows=696954748 width=88)
                           Conds:RS_171._col1=RS_172._col0(Inner),Output:["_col2"]
                         <-Map 56 [SIMPLE_EDGE]
                           SHUFFLE [RS_172]
                             PartitionCols:_col0
                             Select Operator [SEL_164] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_324] (rows=3600 width=107)
+                              Filter Operator [FIL_317] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_162] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 51 [SIMPLE_EDGE]
                           SHUFFLE [RS_171]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_348] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_341] (rows=633595212 width=88)
                               Conds:RS_168._col0=RS_169._col0(Inner),Output:["_col1","_col2"]
                             <-Map 50 [SIMPLE_EDGE]
                               SHUFFLE [RS_168]
                                 PartitionCols:_col0
                                 Select Operator [SEL_158] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_322] (rows=575995635 width=88)
+                                  Filter Operator [FIL_315] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_156] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -618,54 +618,54 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_161] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_323] (rows=14400 width=471)
+                                  Filter Operator [FIL_316] (rows=14400 width=471)
                                     predicate:((t_hour = 9) and (t_minute >= 30) and t_time_sk is not null)
                                     TableScan [TS_159] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
           <-Reducer 62 [SIMPLE_EDGE]
-            SHUFFLE [RS_222]
+            SHUFFLE [RS_215]
               Group By Operator [GBY_206] (rows=1 width=8)
                 Output:["_col0"],aggregations:["count(VALUE._col0)"]
               <-Reducer 61 [SIMPLE_EDGE]
                 SHUFFLE [RS_205]
                   Group By Operator [GBY_204] (rows=1 width=8)
                     Output:["_col0"],aggregations:["count()"]
-                    Merge Join Operator [MERGEJOIN_353] (rows=766650239 width=88)
+                    Merge Join Operator [MERGEJOIN_346] (rows=766650239 width=88)
                       Conds:RS_200._col2=RS_201._col0(Inner)
                     <-Map 65 [SIMPLE_EDGE]
                       SHUFFLE [RS_201]
                         PartitionCols:_col0
                         Select Operator [SEL_193] (rows=852 width=1910)
                           Output:["_col0"]
-                          Filter Operator [FIL_329] (rows=852 width=1910)
+                          Filter Operator [FIL_322] (rows=852 width=1910)
                             predicate:((s_store_name = 'ese') and s_store_sk is not null)
                             TableScan [TS_191] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name"]
                     <-Reducer 60 [SIMPLE_EDGE]
                       SHUFFLE [RS_200]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_352] (rows=696954748 width=88)
+                        Merge Join Operator [MERGEJOIN_345] (rows=696954748 width=88)
                           Conds:RS_197._col1=RS_198._col0(Inner),Output:["_col2"]
                         <-Map 64 [SIMPLE_EDGE]
                           SHUFFLE [RS_198]
                             PartitionCols:_col0
                             Select Operator [SEL_190] (rows=3600 width=107)
                               Output:["_col0"]
-                              Filter Operator [FIL_328] (rows=3600 width=107)
+                              Filter Operator [FIL_321] (rows=3600 width=107)
                                 predicate:((((hd_dep_count = 3) and (hd_vehicle_count <= 5)) or ((hd_dep_count = 0) and (hd_vehicle_count <= 2)) or ((hd_dep_count = 1) and (hd_vehicle_count <= 3))) and hd_demo_sk is not null)
                                 TableScan [TS_188] (rows=7200 width=107)
                                   default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count","hd_vehicle_count"]
                         <-Reducer 59 [SIMPLE_EDGE]
                           SHUFFLE [RS_197]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_351] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_344] (rows=633595212 width=88)
                               Conds:RS_194._col0=RS_195._col0(Inner),Output:["_col1","_col2"]
                             <-Map 58 [SIMPLE_EDGE]
                               SHUFFLE [RS_194]
                                 PartitionCols:_col0
                                 Select Operator [SEL_184] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_326] (rows=575995635 width=88)
+                                  Filter Operator [FIL_319] (rows=575995635 width=88)
                                     predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_182] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
@@ -674,7 +674,7 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_187] (rows=14400 width=471)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_327] (rows=14400 width=471)
+                                  Filter Operator [FIL_320] (rows=14400 width=471)
                                     predicate:((t_hour = 9) and (t_minute < 30) and t_time_sk is not null)
                                     TableScan [TS_185] (rows=86400 width=471)
                                       default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query90.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query90.q.out b/ql/src/test/results/clientpositive/perf/query90.q.out
index eea2a06..7108931 100644
--- a/ql/src/test/results/clientpositive/perf/query90.q.out
+++ b/ql/src/test/results/clientpositive/perf/query90.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[93][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 6' is a cross product
+Warning: Shuffle Join MERGEJOIN[92][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 6' is a cross product
 PREHOOK: query: explain select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio from ( select count(*) amc from web_sales, household_demographics , time_dim, web_page where ws_sold_time_sk = time_dim.t_time_sk and ws_ship_hdemo_sk = household_demographics.hd_demo_sk and ws_web_page_sk = web_page.wp_web_page_sk and time_dim.t_hour between 6 and 6+1 and household_demographics.hd_dep_count = 8 and web_page.wp_char_count between 5000 and 5200) at, ( select count(*) pmc from web_sales, household_demographics , time_dim, web_page where ws_sold_time_sk = time_dim.t_time_sk and ws_ship_hdemo_sk = household_demographics.hd_demo_sk and ws_web_page_sk = web_page.wp_web_page_sk and time_dim.t_hour between 14 and 14+1 and household_demographics.hd_dep_count = 8 and web_page.wp_char_count between 5000 and 5200) pt order by am_pm_ratio limit 100
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select cast(amc as decimal(15,4))/cast(pmc as decimal(15,4)) am_pm_ratio from ( select count(*) amc from web_sales, household_demographics , time_dim, web_page where ws_sold_time_sk = time_dim.t_time_sk and ws_ship_hdemo_sk = household_demographics.hd_demo_sk and ws_web_page_sk = web_page.wp_web_page_sk and time_dim.t_hour between 6 and 6+1 and household_demographics.hd_dep_count = 8 and web_page.wp_char_count between 5000 and 5200) at, ( select count(*) pmc from web_sales, household_demographics , time_dim, web_page where ws_sold_time_sk = time_dim.t_time_sk and ws_ship_hdemo_sk = household_demographics.hd_demo_sk and ws_web_page_sk = web_page.wp_web_page_sk and time_dim.t_hour between 14 and 14+1 and household_demographics.hd_dep_count = 8 and web_page.wp_char_count between 5000 and 5200) pt order by am_pm_ratio limit 100
@@ -22,61 +22,61 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7
-      File Output Operator [FS_60]
-        Limit [LIM_59] (rows=1 width=8)
+      File Output Operator [FS_59]
+        Limit [LIM_58] (rows=1 width=8)
           Number of rows:100
-          Select Operator [SEL_58] (rows=1 width=8)
+          Select Operator [SEL_57] (rows=1 width=8)
             Output:["_col0"]
           <-Reducer 6 [SIMPLE_EDGE]
-            SHUFFLE [RS_57]
-              Select Operator [SEL_56] (rows=1 width=8)
+            SHUFFLE [RS_56]
+              Select Operator [SEL_55] (rows=1 width=8)
                 Output:["_col0"]
-                Merge Join Operator [MERGEJOIN_93] (rows=1 width=8)
+                Merge Join Operator [MERGEJOIN_92] (rows=1 width=8)
                   Conds:(Inner),Output:["_col0","_col1"]
                 <-Reducer 15 [SIMPLE_EDGE]
-                  SHUFFLE [RS_54]
+                  SHUFFLE [RS_53]
                     Group By Operator [GBY_50] (rows=1 width=8)
                       Output:["_col0"],aggregations:["count(VALUE._col0)"]
                     <-Reducer 14 [SIMPLE_EDGE]
                       SHUFFLE [RS_49]
                         Group By Operator [GBY_48] (rows=1 width=8)
                           Output:["_col0"],aggregations:["count()"]
-                          Merge Join Operator [MERGEJOIN_92] (rows=191667562 width=135)
+                          Merge Join Operator [MERGEJOIN_91] (rows=191667562 width=135)
                             Conds:RS_44._col1=RS_45._col0(Inner)
                           <-Map 18 [SIMPLE_EDGE]
                             SHUFFLE [RS_45]
                               PartitionCols:_col0
                               Select Operator [SEL_37] (rows=3600 width=107)
                                 Output:["_col0"]
-                                Filter Operator [FIL_86] (rows=3600 width=107)
+                                Filter Operator [FIL_85] (rows=3600 width=107)
                                   predicate:((hd_dep_count = 8) and hd_demo_sk is not null)
                                   TableScan [TS_35] (rows=7200 width=107)
                                     default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count"]
                           <-Reducer 13 [SIMPLE_EDGE]
                             SHUFFLE [RS_44]
                               PartitionCols:_col1
-                              Merge Join Operator [MERGEJOIN_91] (rows=174243235 width=135)
+                              Merge Join Operator [MERGEJOIN_90] (rows=174243235 width=135)
                                 Conds:RS_41._col0=RS_42._col0(Inner),Output:["_col1"]
                               <-Map 17 [SIMPLE_EDGE]
                                 SHUFFLE [RS_42]
                                   PartitionCols:_col0
                                   Select Operator [SEL_34] (rows=43200 width=471)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_85] (rows=43200 width=471)
+                                    Filter Operator [FIL_84] (rows=43200 width=471)
                                       predicate:(t_hour BETWEEN 14 AND 15 and t_time_sk is not null)
                                       TableScan [TS_32] (rows=86400 width=471)
                                         default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour"]
                               <-Reducer 12 [SIMPLE_EDGE]
                                 SHUFFLE [RS_41]
                                   PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_90] (rows=158402938 width=135)
+                                  Merge Join Operator [MERGEJOIN_89] (rows=158402938 width=135)
                                     Conds:RS_38._col2=RS_39._col0(Inner),Output:["_col0","_col1"]
                                   <-Map 11 [SIMPLE_EDGE]
                                     SHUFFLE [RS_38]
                                       PartitionCols:_col2
                                       Select Operator [SEL_28] (rows=144002668 width=135)
                                         Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_83] (rows=144002668 width=135)
+                                        Filter Operator [FIL_82] (rows=144002668 width=135)
                                           predicate:(ws_ship_hdemo_sk is not null and ws_sold_time_sk is not null and ws_web_page_sk is not null)
                                           TableScan [TS_26] (rows=144002668 width=135)
                                             default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_time_sk","ws_ship_hdemo_sk","ws_web_page_sk"]
@@ -85,54 +85,54 @@ Stage-0
                                       PartitionCols:_col0
                                       Select Operator [SEL_31] (rows=2301 width=585)
                                         Output:["_col0"]
-                                        Filter Operator [FIL_84] (rows=2301 width=585)
+                                        Filter Operator [FIL_83] (rows=2301 width=585)
                                           predicate:(wp_char_count BETWEEN 5000 AND 5200 and wp_web_page_sk is not null)
                                           TableScan [TS_29] (rows=4602 width=585)
                                             default@web_page,web_page,Tbl:COMPLETE,Col:NONE,Output:["wp_web_page_sk","wp_char_count"]
                 <-Reducer 5 [SIMPLE_EDGE]
-                  SHUFFLE [RS_53]
+                  SHUFFLE [RS_52]
                     Group By Operator [GBY_24] (rows=1 width=8)
                       Output:["_col0"],aggregations:["count(VALUE._col0)"]
                     <-Reducer 4 [SIMPLE_EDGE]
                       SHUFFLE [RS_23]
                         Group By Operator [GBY_22] (rows=1 width=8)
                           Output:["_col0"],aggregations:["count()"]
-                          Merge Join Operator [MERGEJOIN_89] (rows=191667562 width=135)
+                          Merge Join Operator [MERGEJOIN_88] (rows=191667562 width=135)
                             Conds:RS_18._col1=RS_19._col0(Inner)
                           <-Map 10 [SIMPLE_EDGE]
                             SHUFFLE [RS_19]
                               PartitionCols:_col0
                               Select Operator [SEL_11] (rows=3600 width=107)
                                 Output:["_col0"]
-                                Filter Operator [FIL_82] (rows=3600 width=107)
+                                Filter Operator [FIL_81] (rows=3600 width=107)
                                   predicate:((hd_dep_count = 8) and hd_demo_sk is not null)
                                   TableScan [TS_9] (rows=7200 width=107)
                                     default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_dep_count"]
                           <-Reducer 3 [SIMPLE_EDGE]
                             SHUFFLE [RS_18]
                               PartitionCols:_col1
-                              Merge Join Operator [MERGEJOIN_88] (rows=174243235 width=135)
+                              Merge Join Operator [MERGEJOIN_87] (rows=174243235 width=135)
                                 Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1"]
                               <-Map 9 [SIMPLE_EDGE]
                                 SHUFFLE [RS_16]
                                   PartitionCols:_col0
                                   Select Operator [SEL_8] (rows=43200 width=471)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_81] (rows=43200 width=471)
+                                    Filter Operator [FIL_80] (rows=43200 width=471)
                                       predicate:(t_hour BETWEEN 6 AND 7 and t_time_sk is not null)
                                       TableScan [TS_6] (rows=86400 width=471)
                                         default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour"]
                               <-Reducer 2 [SIMPLE_EDGE]
                                 SHUFFLE [RS_15]
                                   PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_87] (rows=158402938 width=135)
+                                  Merge Join Operator [MERGEJOIN_86] (rows=158402938 width=135)
                                     Conds:RS_12._col2=RS_13._col0(Inner),Output:["_col0","_col1"]
                                   <-Map 1 [SIMPLE_EDGE]
                                     SHUFFLE [RS_12]
                                       PartitionCols:_col2
                                       Select Operator [SEL_2] (rows=144002668 width=135)
                                         Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_79] (rows=144002668 width=135)
+                                        Filter Operator [FIL_78] (rows=144002668 width=135)
                                           predicate:(ws_ship_hdemo_sk is not null and ws_sold_time_sk is not null and ws_web_page_sk is not null)
                                           TableScan [TS_0] (rows=144002668 width=135)
                                             default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_time_sk","ws_ship_hdemo_sk","ws_web_page_sk"]
@@ -141,7 +141,7 @@ Stage-0
                                       PartitionCols:_col0
                                       Select Operator [SEL_5] (rows=2301 width=585)
                                         Output:["_col0"]
-                                        Filter Operator [FIL_80] (rows=2301 width=585)
+                                        Filter Operator [FIL_79] (rows=2301 width=585)
                                           predicate:(wp_char_count BETWEEN 5000 AND 5200 and wp_web_page_sk is not null)
                                           TableScan [TS_3] (rows=4602 width=585)
                                             default@web_page,web_page,Tbl:COMPLETE,Col:NONE,Output:["wp_web_page_sk","wp_char_count"]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/pointlookup2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup2.q.out b/ql/src/test/results/clientpositive/pointlookup2.q.out
index d0ad68a..5facf9a 100644
--- a/ql/src/test/results/clientpositive/pointlookup2.q.out
+++ b/ql/src/test/results/clientpositive/pointlookup2.q.out
@@ -379,23 +379,27 @@ STAGE PLANS:
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3, _col4
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: int), _col4 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col3,_col4
-                  columns.types int,string,int,string
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3,_col4
+                    columns.types int,string,string,int,string
+                    escape.delim \
+                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -408,7 +412,7 @@ STAGE PLANS:
               sort order: +
               Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col1 (type: string), _col3 (type: int), _col4 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -419,8 +423,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col3,_col4
-              columns.types int,string,int,string
+              columns _col0,_col1,_col2,_col3,_col4
+              columns.types int,string,string,int,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -428,8 +432,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col3,_col4
-                columns.types int,string,int,string
+                columns _col0,_col1,_col2,_col3,_col4
+                columns.types int,string,string,int,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -438,7 +442,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), '2000-04-08' (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -639,23 +643,27 @@ STAGE PLANS:
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3, _col4
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: int), _col4 (type: string), '2000-04-09' (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col3,_col4
-                  columns.types int,string,int,string
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3,_col4,_col5
+                    columns.types int,string,string,int,string,string
+                    escape.delim \
+                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -668,7 +676,7 @@ STAGE PLANS:
               sort order: +
               Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col1 (type: string), _col3 (type: int), _col4 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: int), _col4 (type: string), _col5 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -679,8 +687,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col3,_col4
-              columns.types int,string,int,string
+              columns _col0,_col1,_col2,_col3,_col4,_col5
+              columns.types int,string,string,int,string,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -688,8 +696,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col3,_col4
-                columns.types int,string,int,string
+                columns _col0,_col1,_col2,_col3,_col4,_col5
+                columns.types int,string,string,int,string,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -698,7 +706,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), '2000-04-09' (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: int), VALUE._col3 (type: string), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -730,7 +738,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain extended
 select *
 from pcr_t1 t1 join pcr_t2 t2
@@ -1032,7 +1040,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain extended
 select *
 from pcr_t1 t1 join pcr_t2 t2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/pointlookup3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup3.q.out b/ql/src/test/results/clientpositive/pointlookup3.q.out
index 39804cf..def1bc8 100644
--- a/ql/src/test/results/clientpositive/pointlookup3.q.out
+++ b/ql/src/test/results/clientpositive/pointlookup3.q.out
@@ -305,7 +305,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), '2001-04-08' (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -461,23 +461,27 @@ STAGE PLANS:
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col6
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col6 (type: string), '2001-04-08' (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col3,_col4,_col5,_col6
-                  columns.types int,string,string,int,string,string
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+                    columns.types int,string,string,string,int,string,string,string
+                    escape.delim \
+                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -490,7 +494,7 @@ STAGE PLANS:
               sort order: +
               Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col1 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col6 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col6 (type: string), _col7 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -501,8 +505,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col3,_col4,_col5,_col6
-              columns.types int,string,string,int,string,string
+              columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+              columns.types int,string,string,string,int,string,string,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -510,8 +514,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col3,_col4,_col5,_col6
-                columns.types int,string,string,int,string,string
+                columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+                columns.types int,string,string,string,int,string,string,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -520,7 +524,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: string), '2001-04-08' (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: string), VALUE._col6 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -723,23 +727,27 @@ STAGE PLANS:
             1 _col0 (type: int)
           outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col7
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            GlobalTableId: 0
+          Select Operator
+            expressions: _col0 (type: int), _col1 (type: string), '2000-04-08' (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), '2000-04-09' (type: string), _col7 (type: string)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
+            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              GlobalTableId: 0
 #### A masked pattern was here ####
-            NumFilesPerFileSink: 1
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                properties:
-                  columns _col0,_col1,_col3,_col4,_col5,_col7
-                  columns.types int,string,string,int,string,string
-                  escape.delim \
-                  serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            TotalFiles: 1
-            GatherStats: false
-            MultiFileSpray: false
+              NumFilesPerFileSink: 1
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  properties:
+                    columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+                    columns.types int,string,string,string,int,string,string,string
+                    escape.delim \
+                    serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              TotalFiles: 1
+              GatherStats: false
+              MultiFileSpray: false
 
   Stage: Stage-2
     Map Reduce
@@ -752,7 +760,7 @@ STAGE PLANS:
               sort order: +
               Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
               tag: -1
-              value expressions: _col1 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col7 (type: string)
+              value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: int), _col5 (type: string), _col6 (type: string), _col7 (type: string)
               auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
@@ -763,8 +771,8 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.SequenceFileInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
             properties:
-              columns _col0,_col1,_col3,_col4,_col5,_col7
-              columns.types int,string,string,int,string,string
+              columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+              columns.types int,string,string,string,int,string,string,string
               escape.delim \
               serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
             serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -772,8 +780,8 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.SequenceFileInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
               properties:
-                columns _col0,_col1,_col3,_col4,_col5,_col7
-                columns.types int,string,string,int,string,string
+                columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7
+                columns.types int,string,string,string,int,string,string,string
                 escape.delim \
                 serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
               serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
@@ -782,7 +790,7 @@ STAGE PLANS:
       Needs Tagging: false
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), '2000-04-08' (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), '2000-04-09' (type: string), VALUE._col6 (type: string)
+          expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: string), VALUE._col6 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
           Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
@@ -814,7 +822,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain extended
 select *
 from pcr_t1 t1 join pcr_t1 t2

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_constant_expr.q.out b/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
index 17e2bab..21ea071 100644
--- a/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
+++ b/ql/src/test/results/clientpositive/ppd_constant_expr.q.out
@@ -106,9 +106,9 @@ INSERT OVERWRITE TABLE ppd_constant_expr SELECT 4 + NULL, src1.key - NULL, NULL
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@ppd_constant_expr
-POSTHOOK: Lineage: ppd_constant_expr.c1 EXPRESSION []
+POSTHOOK: Lineage: ppd_constant_expr.c1 SIMPLE []
 POSTHOOK: Lineage: ppd_constant_expr.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: ppd_constant_expr.c3 EXPRESSION []
+POSTHOOK: Lineage: ppd_constant_expr.c3 SIMPLE []
 PREHOOK: query: SELECT ppd_constant_expr.* FROM ppd_constant_expr
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ppd_constant_expr
@@ -242,9 +242,9 @@ INSERT OVERWRITE TABLE ppd_constant_expr SELECT 4 + NULL, src1.key - NULL, NULL
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: default@ppd_constant_expr
-POSTHOOK: Lineage: ppd_constant_expr.c1 EXPRESSION []
+POSTHOOK: Lineage: ppd_constant_expr.c1 SIMPLE []
 POSTHOOK: Lineage: ppd_constant_expr.c2 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: ppd_constant_expr.c3 EXPRESSION []
+POSTHOOK: Lineage: ppd_constant_expr.c3 SIMPLE []
 PREHOOK: query: SELECT ppd_constant_expr.* FROM ppd_constant_expr
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ppd_constant_expr

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join5.q.out b/ql/src/test/results/clientpositive/ppd_join5.q.out
index f464c17..44607a0 100644
--- a/ql/src/test/results/clientpositive/ppd_join5.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join5.q.out
@@ -32,7 +32,7 @@ POSTHOOK: Lineage: t1.id1 SIMPLE []
 POSTHOOK: Lineage: t1.id2 SIMPLE []
 POSTHOOK: Lineage: t2.d SIMPLE []
 POSTHOOK: Lineage: t2.id SIMPLE []
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain
 select a.*,b.d d1,c.d d2 from
   t1 a join t2 b on (a.id1 = b.id)
@@ -148,7 +148,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: explain
 select * from (
 select a.*,b.d d1,c.d d2 from
@@ -271,7 +271,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select * from (
 select a.*,b.d d1,c.d d2 from
   t1 a join t2 b on (a.id1 = b.id)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
index ba5d187..ea0772a 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
@@ -38,33 +38,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string)
           TableScan
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
+              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: key (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
           TableScan
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -85,7 +85,7 @@ STAGE PLANS:
         Join Operator
           condition map:
                Inner Join 0 to 1
-               Inner Join 1 to 2
+               Inner Join 0 to 2
           keys:
             0 _col0 (type: string)
             1 _col0 (type: string)
@@ -93,7 +93,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col0 (type: string)
+            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -402,33 +402,33 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+                expressions: key (type: string), value (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string)
           TableScan
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
+              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+                expressions: key (type: string)
+                outputColumnNames: _col0
                 Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
           TableScan
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -449,7 +449,7 @@ STAGE PLANS:
         Join Operator
           condition map:
                Inner Join 0 to 1
-               Inner Join 1 to 2
+               Inner Join 0 to 2
           keys:
             0 _col0 (type: string)
             1 _col0 (type: string)
@@ -457,7 +457,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col0 (type: string)
+            expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string), _col2 (type: string)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join5.q.out b/ql/src/test/results/clientpositive/ppd_outer_join5.q.out
index 65ca9d1..643a589 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join5.q.out
@@ -30,7 +30,7 @@ POSTHOOK: query: create table t4 (id int, key string, value string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t4
-Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from t1 full outer join t2 on t1.id=t2.id join t3 on t2.id=t3.id where t3.id=20
@@ -118,7 +118,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t2.id=t3.id) where t2.id=20
@@ -202,7 +202,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from t1 join t2 on (t1.id=t2.id) left outer join t3 on (t1.id=t3.id) where t2.id=20

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out
index 3dbd258..a50e10c 100644
--- a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out
+++ b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out
@@ -263,7 +263,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- Q4: here, the filter c.bar should be created under the first join but above the second
 explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2
 PREHOOK: type: QUERY


[40/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out
deleted file mode 100644
index 1bfdba2..0000000
--- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.java1.8.out
+++ /dev/null
@@ -1,890 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [sq_2_notin_nullcheck]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-2 is a root stage
-  Stage-1 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-2
-    Spark
-      Edges:
-        Reducer 2 <- Map 10 (PARTITION-LEVEL SORT, 1), Reducer 9 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 2), Reducer 2 (PARTITION-LEVEL SORT, 2)
-        Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 2), Map 6 (PARTITION-LEVEL SORT, 2)
-        Reducer 9 <- Map 8 (GROUP, 1)
-        Reducer 4 <- Reducer 3 (SORT, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 10 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: key (type: string), value (type: string)
-        Map 11 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: key (type: string), value (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: key (type: string), value (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-        Map 6 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '9') and value is not null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-        Map 7 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key > '2') (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-        Map 8 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '2') and key is null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-        Reducer 2 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Semi Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: string)
-        Reducer 3 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Outer Join0 to 1
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col5
-                Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: _col5 is null (type: boolean)
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
-        Reducer 4 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.src_5
-        Reducer 5 
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Semi Join 0 to 1
-                keys:
-                  0 key (type: string), value (type: string)
-                  1 _col0 (type: string), _col1 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.src_4
-        Reducer 9 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: 0 (type: bigint)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-Warning: Shuffle Join JOIN[31][tables = [sq_2_notin_nullcheck]] in Work 'Reducer 2' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-2:MAPRED
-RUN: Stage-1:MOVE
-RUN: Stage-0:MOVE
-RUN: Stage-3:STATS
-RUN: Stage-4:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2
-Warning: Map Join MAPJOIN[46][bigTable=b] in task 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-5 is a root stage
-  Stage-2 depends on stages: Stage-5
-  Stage-1 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-5
-    Spark
-      Edges:
-        Reducer 6 <- Map 5 (GROUP, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '9') and value is not null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                        Spark HashTable Sink Operator
-                          keys:
-                            0 key (type: string), value (type: string)
-                            1 _col0 (type: string), _col1 (type: string)
-            Local Work:
-              Map Reduce Local Work
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key > '2') (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Spark HashTable Sink Operator
-                        keys:
-                          0 _col0 (type: string)
-                          1 _col0 (type: string)
-            Local Work:
-              Map Reduce Local Work
-        Map 5 
-            Map Operator Tree:
-                TableScan
-                  alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: ((key > '2') and key is null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-        Reducer 6 
-            Local Work:
-              Map Reduce Local Work
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: count(VALUE._col0)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: 0 (type: bigint)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: bigint)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      Spark HashTable Sink Operator
-                        keys:
-                          0 
-                          1 
-
-  Stage: Stage-2
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (SORT, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Map Join Operator
-                    condition map:
-                         Left Semi Join 0 to 1
-                    keys:
-                      0 
-                      1 
-                    outputColumnNames: _col0, _col1
-                    input vertices:
-                      1 Reducer 6
-                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                    Map Join Operator
-                      condition map:
-                           Left Outer Join0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 _col0 (type: string)
-                      outputColumnNames: _col0, _col1, _col5
-                      input vertices:
-                        1 Map 4
-                      Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                      Filter Operator
-                        predicate: _col5 is null (type: boolean)
-                        Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                        Select Operator
-                          expressions: _col0 (type: string), _col1 (type: string)
-                          outputColumnNames: _col0, _col1
-                          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            key expressions: _col0 (type: string)
-                            sort order: +
-                            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col1 (type: string)
-                  Map Join Operator
-                    condition map:
-                         Left Semi Join 0 to 1
-                    keys:
-                      0 key (type: string), value (type: string)
-                      1 _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1
-                    input vertices:
-                      1 Map 3
-                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          name: default.src_4
-            Local Work:
-              Map Reduce Local Work
-        Reducer 2 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.src_5
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-3
-    Stats-Aggr Operator
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-4
-    Stats-Aggr Operator
-
-Warning: Map Join MAPJOIN[46][bigTable=b] in task 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-5:MAPRED
-RUN: Stage-2:MAPRED
-RUN: Stage-1:MOVE
-RUN: Stage-0:MOVE
-RUN: Stage-3:STATS
-RUN: Stage-4:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
index 04dd9b4..d6df85a 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out
@@ -73,8 +73,8 @@ STAGE PLANS:
     Spark
       Edges:
         Reducer 2 <- Map 10 (PARTITION-LEVEL SORT, 1), Reducer 9 (PARTITION-LEVEL SORT, 1)
-        Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 1), Reducer 2 (PARTITION-LEVEL SORT, 1)
-        Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 1), Map 6 (PARTITION-LEVEL SORT, 1)
+        Reducer 3 <- Map 7 (PARTITION-LEVEL SORT, 4), Reducer 2 (PARTITION-LEVEL SORT, 4)
+        Reducer 5 <- Map 11 (PARTITION-LEVEL SORT, 4), Map 6 (PARTITION-LEVEL SORT, 4)
         Reducer 9 <- Map 8 (GROUP, 1)
         Reducer 4 <- Reducer 3 (SORT, 1)
 #### A masked pattern was here ####
@@ -105,21 +105,21 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((key > '9') and value is not null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         keys: _col0 (type: string), _col1 (type: string)
                         mode: hash
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
                           key expressions: _col0 (type: string), _col1 (type: string)
                           sort order: ++
                           Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -235,19 +235,17 @@ STAGE PLANS:
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: 0 (type: bigint)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: bigint)
+                      keys: 0 (type: bigint)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-1
     Move Operator
@@ -308,10 +306,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@src_4
 POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
 RUN: Stage-2:MAPRED
 RUN: Stage-1:MOVE
 RUN: Stage-0:MOVE
@@ -514,16 +512,16 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((key > '9') and value is not null) (type: boolean)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         keys: _col0 (type: string), _col1 (type: string)
                         mode: hash
                         outputColumnNames: _col0, _col1
-                        Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                         Spark HashTable Sink Operator
                           keys:
                             0 key (type: string), value (type: string)
@@ -578,16 +576,14 @@ STAGE PLANS:
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: (_col0 = 0) (type: boolean)
-                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: 0 (type: bigint)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: bigint)
+                      keys: 0 (type: bigint)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Spark HashTable Sink Operator
                         keys:
                           0 
@@ -730,10 +726,10 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@src_4
 POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key EXPRESSION [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value EXPRESSION [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
 RUN: Stage-5:MAPRED
 RUN: Stage-2:MAPRED
 RUN: Stage-1:MOVE

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
deleted file mode 100644
index 86b7544..0000000
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.7.out
+++ /dev/null
@@ -1,217 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (GROUP, 2)
-        Reducer 3 <- Reducer 2 (SORT, 1)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: i (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: avg(50), avg(50.0), avg(50)
-                      keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-            Execution mode: vectorized
-        Reducer 2 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
-        Reducer 3 
-            Execution mode: vectorized
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50.0000
-65537	50.0	50.0	50.0000
-65538	50.0	50.0	50.0000
-65539	50.0	50.0	50.0000
-65540	50.0	50.0	50.0000
-65541	50.0	50.0	50.0000
-65542	50.0	50.0	50.0000
-65543	50.0	50.0	50.0000
-65544	50.0	50.0	50.0000
-65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
deleted file mode 100644
index 69f4754..0000000
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.java1.8.out
+++ /dev/null
@@ -1,203 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (GROUP, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: i (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: avg(50), avg(50.0), avg(50)
-                      keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-            Execution mode: vectorized
-        Reducer 2 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65636	50.0	50.0	50
-65550	50.0	50.0	50
-65592	50.0	50.0	50
-65744	50.0	50.0	50
-65722	50.0	50.0	50
-65668	50.0	50.0	50
-65598	50.0	50.0	50
-65596	50.0	50.0	50
-65568	50.0	50.0	50
-65738	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
index 63cdc24..0459d93 100644
--- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out
@@ -102,14 +102,14 @@ PREHOOK: query: EXPLAIN SELECT
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT 
   i,
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
@@ -119,7 +119,8 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP, 2)
+        Reducer 2 <- Map 1 (GROUP, 4)
+        Reducer 3 <- Reducer 2 (SORT, 1)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -129,11 +130,11 @@ STAGE PLANS:
                   Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: i (type: int)
-                    outputColumnNames: i
+                    outputColumnNames: _col0
                     Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: avg(50), avg(UDFToDouble(50)), avg(CAST( 50 AS decimal(10,0)))
-                      keys: i (type: int)
+                      aggregations: avg(50), avg(50.0), avg(50)
+                      keys: _col0 (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
@@ -152,6 +153,19 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
+        Reducer 3 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
                 Limit
                   Number of rows: 10
                   Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
@@ -159,8 +173,8 @@ STAGE PLANS:
                     compressed: false
                     Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
                     table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
@@ -174,7 +188,7 @@ PREHOOK: query: SELECT
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 PREHOOK: type: QUERY
 PREHOOK: Input: default@over1korc
 #### A masked pattern was here ####
@@ -183,17 +197,17 @@ POSTHOOK: query: SELECT
   AVG(CAST(50 AS INT)) AS `avg_int_ok`,
   AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
   AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i LIMIT 10
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@over1korc
 #### A masked pattern was here ####
-65598	50.0	50.0	50
-65694	50.0	50.0	50
-65678	50.0	50.0	50
-65684	50.0	50.0	50
-65596	50.0	50.0	50
-65692	50.0	50.0	50
-65630	50.0	50.0	50
-65674	50.0	50.0	50
-65628	50.0	50.0	50
-65776	50.0	50.0	50
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
deleted file mode 100644
index a4908bc..0000000
--- a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
+++ /dev/null
@@ -1,191 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-drop table stats_list_bucket
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-drop table stats_list_bucket
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table stats_list_bucket_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table stats_list_bucket_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table stats_list_bucket (
-  c1 string,
-  c2 string
-) partitioned by (ds string, hr string)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats_list_bucket
-POSTHOOK: query: create table stats_list_bucket (
-  c1 string,
-  c2 string
-) partitioned by (ds string, hr string)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats_list_bucket
-PREHOOK: query: -- Try partitioned table with list bucketing.
--- The stats should show 500 rows loaded, as many rows as the src table has.
-
-insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-  select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
-POSTHOOK: query: -- Try partitioned table with list bucketing.
--- The stats should show 500 rows loaded, as many rows as the src table has.
-
-insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-  select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_list_bucket
-POSTHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_list_bucket
-# col_name            	data_type           	comment             
-	 	 
-c1                  	string              	                    
-c2                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	stats_list_bucket   	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5522                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[c1, c2]            	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[82, val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82, [466, val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [287, val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Also try non-partitioned table with list bucketing.
--- Stats should show the same number of rows.
-
-create table stats_list_bucket_1 (
-  c1 string,
-  c2 string
-)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: query: -- Also try non-partitioned table with list bucketing.
--- Stats should show the same number of rows.
-
-create table stats_list_bucket_1 (
-  c1 string,
-  c2 string
-)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats_list_bucket_1
-PREHOOK: query: insert overwrite table stats_list_bucket_1
-  select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: query: insert overwrite table stats_list_bucket_1
-  select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: Lineage: stats_list_bucket_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: stats_list_bucket_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc formatted stats_list_bucket_1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_list_bucket_1
-POSTHOOK: query: desc formatted stats_list_bucket_1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_list_bucket_1
-# col_name            	data_type           	comment             
-	 	 
-c1                  	string              	                    
-c2                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5522                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[c1, c2]            	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[82, val_82]=/stats_list_bucket_1/c1=82/c2=val_82, [466, val_466]=/stats_list_bucket_1/c1=466/c2=val_466, [287, val_287]=/stats_list_bucket_1/c1=287/c2=val_287}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table stats_list_bucket
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats_list_bucket
-PREHOOK: Output: default@stats_list_bucket
-POSTHOOK: query: drop table stats_list_bucket
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats_list_bucket
-POSTHOOK: Output: default@stats_list_bucket
-PREHOOK: query: drop table stats_list_bucket_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats_list_bucket_1
-PREHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: query: drop table stats_list_bucket_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats_list_bucket_1
-POSTHOOK: Output: default@stats_list_bucket_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
deleted file mode 100644
index 8688cee..0000000
--- a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
+++ /dev/null
@@ -1,193 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-drop table stats_list_bucket
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-drop table stats_list_bucket
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: drop table stats_list_bucket_1
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table stats_list_bucket_1
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table stats_list_bucket (
-  c1 string,
-  c2 string
-) partitioned by (ds string, hr string)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats_list_bucket
-POSTHOOK: query: create table stats_list_bucket (
-  c1 string,
-  c2 string
-) partitioned by (ds string, hr string)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats_list_bucket
-PREHOOK: query: -- Make sure we use hashed IDs during stats publishing.
--- Try partitioned table with list bucketing.
--- The stats should show 500 rows loaded, as many rows as the src table has.
-
-insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-  select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
-POSTHOOK: query: -- Make sure we use hashed IDs during stats publishing.
--- Try partitioned table with list bucketing.
--- The stats should show 500 rows loaded, as many rows as the src table has.
-
-insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-  select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_list_bucket
-POSTHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_list_bucket
-# col_name            	data_type           	comment             
-	 	 
-c1                  	string              	                    
-c2                  	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	stats_list_bucket   	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5522                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[c1, c2]            	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[466, val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [287, val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287, [82, val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: -- Also try non-partitioned table with list bucketing.
--- Stats should show the same number of rows.
-
-create table stats_list_bucket_1 (
-  c1 string,
-  c2 string
-)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: query: -- Also try non-partitioned table with list bucketing.
--- Stats should show the same number of rows.
-
-create table stats_list_bucket_1 (
-  c1 string,
-  c2 string
-)
-skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
-stored as directories
-stored as rcfile
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats_list_bucket_1
-PREHOOK: query: insert overwrite table stats_list_bucket_1
-  select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: query: insert overwrite table stats_list_bucket_1
-  select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: Lineage: stats_list_bucket_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: stats_list_bucket_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: desc formatted stats_list_bucket_1
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats_list_bucket_1
-POSTHOOK: query: desc formatted stats_list_bucket_1
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats_list_bucket_1
-# col_name            	data_type           	comment             
-	 	 
-c1                  	string              	                    
-c2                  	string              	                    
-	 	 
-# Detailed Table Information	 	 
-Database:           	default             	 
-#### A masked pattern was here ####
-Retention:          	0                   	 
-#### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
-Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5522                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[c1, c2]            	 
-Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[466, val_466]=/stats_list_bucket_1/c1=466/c2=val_466, [82, val_82]=/stats_list_bucket_1/c1=82/c2=val_82, [287, val_287]=/stats_list_bucket_1/c1=287/c2=val_287}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: drop table stats_list_bucket
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats_list_bucket
-PREHOOK: Output: default@stats_list_bucket
-POSTHOOK: query: drop table stats_list_bucket
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats_list_bucket
-POSTHOOK: Output: default@stats_list_bucket
-PREHOOK: query: drop table stats_list_bucket_1
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats_list_bucket_1
-PREHOOK: Output: default@stats_list_bucket_1
-POSTHOOK: query: drop table stats_list_bucket_1
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats_list_bucket_1
-POSTHOOK: Output: default@stats_list_bucket_1


[21/66] [abbrv] hive git commit: HIVE-12467: Add number of dynamic partitions to error message (Lars Francke reviewed by Prasanth Jayachandran)

Posted by sp...@apache.org.
HIVE-12467: Add number of dynamic partitions to error message (Lars Francke reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/115d225d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/115d225d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/115d225d

Branch: refs/heads/java8
Commit: 115d225d8d311e1d1fa42e8f103c214b26c209a2
Parents: 8a67958
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Tue May 24 12:27:09 2016 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Tue May 24 12:27:09 2016 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/115d225d/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 3ec63ee..cd5e7e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -938,7 +938,8 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           // we cannot proceed and need to tell the hive client that retries won't succeed either
           throw new HiveFatalException(
                ErrorMsg.DYNAMIC_PARTITIONS_TOO_MANY_PER_NODE_ERROR.getErrorCodedMsg()
-               + "Maximum was set to: " + maxPartitions);
+               + "Maximum was set to " + maxPartitions + " partitions per node"
+               + ", number of dynamic partitions on this node: " + valToPaths.size());
         }
 
         if (!conf.getDpSortState().equals(DPSortState.NONE) && prevFsp != null) {


[25/66] [abbrv] hive git commit: Preparing for 2.2.0 development

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/spark-client/pom.xml
----------------------------------------------------------------------
diff --git a/spark-client/pom.xml b/spark-client/pom.xml
index b677195..6cf3b17 100644
--- a/spark-client/pom.xml
+++ b/spark-client/pom.xml
@@ -22,14 +22,14 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
   </parent>
 
   <groupId>org.apache.hive</groupId>
   <artifactId>spark-client</artifactId>
   <packaging>jar</packaging>
   <name>Spark Remote Client</name>
-  <version>2.1.0-SNAPSHOT</version>
+  <version>2.2.0-SNAPSHOT</version>
 
   <properties>
     <hive.path.to.root>..</hive.path.to.root>

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/storage-api/pom.xml
----------------------------------------------------------------------
diff --git a/storage-api/pom.xml b/storage-api/pom.xml
index f5b326b..4095ed2 100644
--- a/storage-api/pom.xml
+++ b/storage-api/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e22e3ad3/testutils/pom.xml
----------------------------------------------------------------------
diff --git a/testutils/pom.xml b/testutils/pom.xml
index 4296275..a0ca8df 100644
--- a/testutils/pom.xml
+++ b/testutils/pom.xml
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>2.1.0-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 


[04/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
index 2626768..aa35d2c 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
@@ -1562,12 +1562,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -1588,7 +1588,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1667,9 +1667,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
@@ -1710,7 +1710,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1808,9 +1808,9 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
@@ -1819,7 +1819,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1898,9 +1898,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
@@ -1926,7 +1926,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -2019,9 +2019,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       sort order: +
-                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
@@ -2062,7 +2062,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                  0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                   1 UDFToString(_col0) (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -2127,6 +2127,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -2151,42 +2152,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col0 (type: string)
-                  1 '2008-04-08' (type: string)
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+                  0 
+                  1 
+                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -2213,32 +2212,15 @@ STAGE PLANS:
         Reducer 5 
             Reduce Operator Tree:
               Group By Operator
-                keys: '2008-04-08' (type: string)
+                keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    sort order: 
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -2246,21 +2228,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
@@ -2276,7 +2255,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
 EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
@@ -2371,7 +2350,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -4534,7 +4513,7 @@ STAGE PLANS:
                            Inner Join 0 to 1
                       keys:
                         0 UDFToDouble(_col0) (type: double)
-                        1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                       input vertices:
                         1 Map 3
                       Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
@@ -4562,12 +4541,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -4652,7 +4631,7 @@ STAGE PLANS:
                       condition map:
                            Inner Join 0 to 1
                       keys:
-                        0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                        0 (UDFToDouble(_col0) * 2.0) (type: double)
                         1 _col0 (type: double)
                       input vertices:
                         1 Map 3
@@ -4753,6 +4732,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -4768,57 +4748,67 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Map 1 <- Reducer 4 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 4 <- Map 3 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (BROADCAST_EDGE), Map 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Map Join Operator
-                      condition map:
-                           Inner Join 0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 '2008-04-08' (type: string)
-                      input vertices:
-                        1 Reducer 4
-                      Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                      HybridGraceHashJoin: true
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-        Map 3 
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-        Reducer 2 
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Inner Join 0 to 1
+                    keys:
+                      0 
+                      1 
+                    input vertices:
+                      0 Map 1
+                    Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 4 
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4832,35 +4822,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Reduce Operator Tree:
-              Group By Operator
-                keys: '2008-04-08' (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -4868,21 +4829,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
index 71b7ee3..08fc33e 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
@@ -561,7 +561,7 @@ bar
 baz
 baz
 baz
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT agg.amount
 FROM agg_01 agg,
 dim_shops d1
@@ -634,7 +634,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT agg.amount
 FROM agg_01 agg,
 dim_shops d1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index 2f88148..6689394 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -2409,29 +2409,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over1k
-                  Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (s = 'foo') (type: boolean)
-                    Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: si (type: smallint), b (type: bigint), f (type: float), t (type: tinyint), i (type: int)
-                      outputColumnNames: _col0, _col1, _col2, _col4, _col5
-                      Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                      expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), t (type: tinyint), i (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'foo' (type: string), _col4 (type: tinyint), _col5 (type: int)
+                        key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: 'foo' (type: string), _col4 (type: tinyint), _col5 (type: int)
-                        Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
+                        Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), 'foo' (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
+                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1974 Data size: 53304 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2485,19 +2485,19 @@ STAGE PLANS:
                     predicate: (t = 27) (type: boolean)
                     Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), i (type: int)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col5
+                      expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27 (type: tinyint), i (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                       Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col3 (type: string), 27 (type: tinyint), _col5 (type: int)
+                        key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col3 (type: string), 27 (type: tinyint), _col5 (type: int)
+                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), 27 (type: tinyint), KEY._col5 (type: int)
+                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -2556,19 +2556,19 @@ STAGE PLANS:
                     predicate: (i = 100) (type: boolean)
                     Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), t (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), t (type: tinyint), 100 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                       Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col3 (type: string), _col4 (type: tinyint), 100 (type: int)
+                        key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), 100 (type: int)
+                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), 100 (type: int)
+                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 429 Data size: 53255 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -2627,19 +2627,19 @@ STAGE PLANS:
                     predicate: ((i = 100) and (t = 27)) (type: boolean)
                     Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      expressions: si (type: smallint), b (type: bigint), f (type: float), s (type: string), 27 (type: tinyint), 100 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                       Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col3 (type: string), 27 (type: tinyint), 100 (type: int)
+                        key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: _col3 (type: string), 27 (type: tinyint), 100 (type: int)
+                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), 27 (type: tinyint), 100 (type: int)
+                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
@@ -2693,29 +2693,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over1k
-                  Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((i = 100) and (s = 'foo')) (type: boolean)
-                    Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: si (type: smallint), b (type: bigint), f (type: float), t (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col4
-                      Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                      expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), t (type: tinyint), 100 (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'foo' (type: string), _col4 (type: tinyint), 100 (type: int)
+                        key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: 'foo' (type: string), _col4 (type: tinyint), 100 (type: int)
-                        Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
+                        Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), 'foo' (type: string), KEY._col4 (type: tinyint), 100 (type: int)
+                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2764,29 +2764,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over1k
-                  Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((t = 27) and (s = 'foo')) (type: boolean)
-                    Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: si (type: smallint), b (type: bigint), f (type: float), i (type: int)
-                      outputColumnNames: _col0, _col1, _col2, _col5
-                      Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                      expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), i (type: int)
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                      Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'foo' (type: string), 27 (type: tinyint), _col5 (type: int)
+                        key expressions: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
                         sort order: +++
-                        Map-reduce partition columns: 'foo' (type: string), 27 (type: tinyint), _col5 (type: int)
-                        Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                        Map-reduce partition columns: _col3 (type: string), _col4 (type: tinyint), _col5 (type: int)
+                        Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: smallint), _col1 (type: bigint), _col2 (type: float)
         Reducer 2 
             Reduce Operator Tree:
               Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), 'foo' (type: string), 27 (type: tinyint), KEY._col5 (type: int)
+                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: bigint), VALUE._col2 (type: float), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 987 Data size: 26652 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 214 Data size: 26565 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -2832,17 +2832,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: over1k
-                  Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 859 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((i = 100) and (t = 27) and (s = 'foo')) (type: boolean)
-                    Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 107 Data size: 13282 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), 100 (type: int)
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-                      Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 107 Data size: 13282 Basic stats: COMPLETE Column stats: NONE
                       File Output Operator
                         compressed: false
-                        Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 107 Data size: 13282 Basic stats: COMPLETE Column stats: NONE
                         table:
                             input format: org.apache.hadoop.mapred.TextInputFormat
                             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
index 97f59d9..6931398 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out
@@ -1613,31 +1613,31 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col1, _col2
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col2)
-                      keys: 'day' (type: string), _col1 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'day' (type: string), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: 'day' (type: string), _col1 (type: string)
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col2 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                keys: 'day' (type: string), KEY._col1 (type: string)
+                keys: KEY._col0 (type: string)
                 mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), 'day' (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
@@ -1741,31 +1741,31 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col1, _col2
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col2)
-                      keys: 'day' (type: string), _col1 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: 'day' (type: string), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: 'day' (type: string), _col1 (type: string)
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col2 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Reducer 2 
             Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
-                keys: 'day' (type: string), KEY._col1 (type: string)
+                keys: KEY._col0 (type: string)
                 mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), 'day' (type: string)
+                  expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), 'day' (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 965577e..84f2044 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -1808,20 +1808,16 @@ Stage-0
           SHUFFLE [RS_12]
             Group By Operator [GBY_11] (rows=1 width=8)
               Output:["_col0"],aggregations:["count('2014')"]
-              Merge Join Operator [MERGEJOIN_20] (rows=400 width=0)
-                Conds:RS_6._col0=RS_7._col0(Inner)
+              Merge Join Operator [MERGEJOIN_18] (rows=400 width=0)
+                Conds:(Inner)
               <-Map 1 [SIMPLE_EDGE]
                 SHUFFLE [RS_6]
-                  PartitionCols:_col0
-                  Select Operator [SEL_2] (rows=20 width=184)
-                    Output:["_col0"]
+                  Select Operator [SEL_2] (rows=20 width=88)
                     TableScan [TS_0] (rows=20 width=13)
                       default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE
               <-Map 4 [SIMPLE_EDGE]
                 SHUFFLE [RS_7]
-                  PartitionCols:_col0
-                  Select Operator [SEL_5] (rows=20 width=184)
-                    Output:["_col0"]
+                  Select Operator [SEL_5] (rows=20 width=88)
                     TableScan [TS_3] (rows=20 width=13)
                       default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE
 
@@ -2128,7 +2124,7 @@ Stage-0
             SHUFFLE [RS_18]
               PartitionCols:_col1
               Merge Join Operator [MERGEJOIN_31] (rows=13 width=8)
-                Conds:RS_15._col0, 1=RS_16._col0, 1(Left Semi),Output:["_col1","_col2"]
+                Conds:RS_15._col0, 1=RS_16._col0, _col1(Left Semi),Output:["_col1","_col2"]
               <-Map 1 [SIMPLE_EDGE]
                 SHUFFLE [RS_15]
                   PartitionCols:_col0, 1
@@ -2140,11 +2136,11 @@ Stage-0
                         default@lineitem,lineitem,Tbl:COMPLETE,Col:COMPLETE,Output:["l_orderkey","l_partkey","l_suppkey","l_linenumber"]
               <-Map 4 [SIMPLE_EDGE]
                 SHUFFLE [RS_16]
-                  PartitionCols:_col0, 1
+                  PartitionCols:_col0, _col1
                   Group By Operator [GBY_14] (rows=4 width=8)
-                    Output:["_col0","_col1"],keys:_col0, 1
-                    Select Operator [SEL_5] (rows=14 width=4)
-                      Output:["_col0"]
+                    Output:["_col0","_col1"],keys:_col0, _col1
+                    Select Operator [SEL_5] (rows=14 width=8)
+                      Output:["_col0","_col1"]
                       Filter Operator [FIL_29] (rows=14 width=96)
                         predicate:((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null)
                         TableScan [TS_3] (rows=100 width=96)
@@ -2347,21 +2343,21 @@ Stage-0
               Output:["_col0","_col1"]
               Filter Operator [FIL_21] (rows=1 width=265)
                 predicate:_col3 is null
-                Merge Join Operator [MERGEJOIN_30] (rows=404 width=265)
+                Merge Join Operator [MERGEJOIN_29] (rows=404 width=265)
                   Conds:RS_18._col0=RS_19._col0(Left Outer),Output:["_col0","_col1","_col3"]
                 <-Map 7 [SIMPLE_EDGE]
                   SHUFFLE [RS_19]
                     PartitionCols:_col0
-                    Select Operator [SEL_13] (rows=166 width=87)
+                    Select Operator [SEL_14] (rows=166 width=87)
                       Output:["_col0"]
-                      Filter Operator [FIL_28] (rows=166 width=87)
+                      Filter Operator [FIL_27] (rows=166 width=87)
                         predicate:(key > '2')
-                        TableScan [TS_11] (rows=500 width=87)
+                        TableScan [TS_12] (rows=500 width=87)
                           default@src_cbo,src_cbo,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
                 <-Reducer 2 [SIMPLE_EDGE]
                   SHUFFLE [RS_18]
                     PartitionCols:_col0
-                    Merge Join Operator [MERGEJOIN_29] (rows=500 width=178)
+                    Merge Join Operator [MERGEJOIN_28] (rows=500 width=178)
                       Conds:(Inner),Output:["_col0","_col1"]
                     <-Map 1 [SIMPLE_EDGE]
                       SHUFFLE [RS_15]
@@ -2371,20 +2367,20 @@ Stage-0
                             default@src_cbo,src_cbo,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
                     <-Reducer 6 [SIMPLE_EDGE]
                       SHUFFLE [RS_16]
-                        Select Operator [SEL_10] (rows=1 width=8)
-                          Filter Operator [FIL_9] (rows=1 width=8)
+                        Select Operator [SEL_11] (rows=1 width=8)
+                          Filter Operator [FIL_10] (rows=1 width=8)
                             predicate:(_col0 = 0)
-                            Group By Operator [GBY_7] (rows=1 width=8)
+                            Group By Operator [GBY_8] (rows=1 width=8)
                               Output:["_col0"],aggregations:["count(VALUE._col0)"]
                             <-Map 5 [SIMPLE_EDGE]
-                              SHUFFLE [RS_6]
-                                Group By Operator [GBY_5] (rows=1 width=8)
+                              SHUFFLE [RS_7]
+                                Group By Operator [GBY_6] (rows=1 width=8)
                                   Output:["_col0"],aggregations:["count()"]
-                                  Select Operator [SEL_4] (rows=1 width=87)
-                                    Filter Operator [FIL_27] (rows=1 width=87)
-                                      predicate:((key > '2') and key is null)
-                                      TableScan [TS_2] (rows=500 width=87)
-                                        default@src_cbo,src_cbo,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
+                                  Filter Operator [FIL_4] (rows=1 width=4)
+                                    predicate:false
+                                    Select Operator [SEL_3] (rows=500 width=4)
+                                      TableScan [TS_2] (rows=500 width=10)
+                                        default@src_cbo,src_cbo,Tbl:COMPLETE,Col:COMPLETE
 
 PREHOOK: query: explain select p_mfgr, b.p_name, p_size 
 from part b 
@@ -2414,35 +2410,35 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 3
-      File Output Operator [FS_23]
-        Select Operator [SEL_22] (rows=1 width=223)
+      File Output Operator [FS_22]
+        Select Operator [SEL_21] (rows=1 width=223)
           Output:["_col0","_col1","_col2"]
-          Filter Operator [FIL_21] (rows=1 width=344)
+          Filter Operator [FIL_20] (rows=1 width=344)
             predicate:_col4 is null
-            Merge Join Operator [MERGEJOIN_28] (rows=1 width=344)
-              Conds:RS_18._col0, _col1=RS_19._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4"]
+            Merge Join Operator [MERGEJOIN_27] (rows=1 width=344)
+              Conds:RS_17._col0, _col1=RS_18._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4"]
             <-Map 6 [SIMPLE_EDGE]
-              SHUFFLE [RS_19]
+              SHUFFLE [RS_18]
                 PartitionCols:_col0, _col1
                 Select Operator [SEL_13] (rows=8 width=219)
                   Output:["_col0","_col1"]
-                  Filter Operator [FIL_26] (rows=8 width=223)
+                  Filter Operator [FIL_25] (rows=8 width=223)
                     predicate:(p_size < 10)
                     TableScan [TS_11] (rows=26 width=223)
                       default@part,b,Tbl:COMPLETE,Col:COMPLETE,Output:["p_name","p_mfgr","p_size"]
             <-Reducer 2 [SIMPLE_EDGE]
-              SHUFFLE [RS_18]
+              SHUFFLE [RS_17]
                 PartitionCols:_col0, _col1
-                Merge Join Operator [MERGEJOIN_27] (rows=26 width=223)
+                Merge Join Operator [MERGEJOIN_26] (rows=26 width=223)
                   Conds:(Inner),Output:["_col0","_col1","_col2"]
                 <-Map 1 [SIMPLE_EDGE]
-                  SHUFFLE [RS_15]
+                  SHUFFLE [RS_14]
                     Select Operator [SEL_1] (rows=26 width=223)
                       Output:["_col0","_col1","_col2"]
                       TableScan [TS_0] (rows=26 width=223)
                         default@part,b,Tbl:COMPLETE,Col:COMPLETE,Output:["p_name","p_mfgr","p_size"]
                 <-Reducer 5 [SIMPLE_EDGE]
-                  SHUFFLE [RS_16]
+                  SHUFFLE [RS_15]
                     Select Operator [SEL_10] (rows=1 width=8)
                       Filter Operator [FIL_9] (rows=1 width=8)
                         predicate:(_col0 = 0)
@@ -2453,7 +2449,7 @@ Stage-0
                             Group By Operator [GBY_5] (rows=1 width=8)
                               Output:["_col0"],aggregations:["count()"]
                               Select Operator [SEL_4] (rows=1 width=223)
-                                Filter Operator [FIL_25] (rows=1 width=223)
+                                Filter Operator [FIL_24] (rows=1 width=223)
                                   predicate:((p_size < 10) and (p_name is null or p_mfgr is null))
                                   TableScan [TS_2] (rows=26 width=223)
                                     default@part,b,Tbl:COMPLETE,Col:COMPLETE,Output:["p_name","p_mfgr","p_size"]
@@ -2488,30 +2484,30 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 4
-      File Output Operator [FS_36]
-        Select Operator [SEL_35] (rows=1 width=125)
+      File Output Operator [FS_35]
+        Select Operator [SEL_34] (rows=1 width=125)
           Output:["_col0","_col1"]
         <-Reducer 3 [SIMPLE_EDGE]
-          SHUFFLE [RS_34]
-            Select Operator [SEL_33] (rows=1 width=125)
+          SHUFFLE [RS_33]
+            Select Operator [SEL_32] (rows=1 width=125)
               Output:["_col0","_col1"]
-              Filter Operator [FIL_32] (rows=1 width=133)
+              Filter Operator [FIL_31] (rows=1 width=133)
                 predicate:_col3 is null
-                Merge Join Operator [MERGEJOIN_42] (rows=1 width=133)
-                  Conds:RS_29.UDFToDouble(_col1)=RS_30._col0(Left Outer),Output:["_col0","_col1","_col3"]
+                Merge Join Operator [MERGEJOIN_41] (rows=1 width=133)
+                  Conds:RS_28.UDFToDouble(_col1)=RS_29._col0(Left Outer),Output:["_col0","_col1","_col3"]
                 <-Reducer 2 [SIMPLE_EDGE]
-                  SHUFFLE [RS_29]
+                  SHUFFLE [RS_28]
                     PartitionCols:UDFToDouble(_col1)
-                    Merge Join Operator [MERGEJOIN_41] (rows=26 width=125)
+                    Merge Join Operator [MERGEJOIN_40] (rows=26 width=125)
                       Conds:(Inner),Output:["_col0","_col1"]
                     <-Map 1 [SIMPLE_EDGE]
-                      SHUFFLE [RS_26]
+                      SHUFFLE [RS_25]
                         Select Operator [SEL_1] (rows=26 width=125)
                           Output:["_col0","_col1"]
                           TableScan [TS_0] (rows=26 width=125)
                             default@part,part,Tbl:COMPLETE,Col:COMPLETE,Output:["p_name","p_size"]
                     <-Reducer 6 [SIMPLE_EDGE]
-                      SHUFFLE [RS_27]
+                      SHUFFLE [RS_26]
                         Select Operator [SEL_17] (rows=1 width=8)
                           Filter Operator [FIL_16] (rows=1 width=8)
                             predicate:(_col0 = 0)
@@ -2526,12 +2522,12 @@ Stage-0
                                     SHUFFLE [RS_6]
                                       Group By Operator [GBY_5] (rows=1 width=0)
                                         Output:["_col0"],aggregations:["avg(p_size)"]
-                                        Filter Operator [FIL_38] (rows=8 width=4)
+                                        Filter Operator [FIL_37] (rows=8 width=4)
                                           predicate:(p_size < 10)
                                           TableScan [TS_2] (rows=26 width=4)
                                             default@part,part,Tbl:COMPLETE,Col:COMPLETE,Output:["p_size"]
                 <-Reducer 8 [SIMPLE_EDGE]
-                  SHUFFLE [RS_30]
+                  SHUFFLE [RS_29]
                     PartitionCols:_col0
                     Group By Operator [GBY_23] (rows=1 width=8)
                       Output:["_col0"],aggregations:["avg(VALUE._col0)"]
@@ -2539,7 +2535,7 @@ Stage-0
                       SHUFFLE [RS_22]
                         Group By Operator [GBY_21] (rows=1 width=0)
                           Output:["_col0"],aggregations:["avg(p_size)"]
-                          Filter Operator [FIL_40] (rows=8 width=4)
+                          Filter Operator [FIL_39] (rows=8 width=4)
                             predicate:(p_size < 10)
                             TableScan [TS_18] (rows=26 width=4)
                               default@part,part,Tbl:COMPLETE,Col:COMPLETE,Output:["p_size"]
@@ -2580,23 +2576,23 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 5
-      File Output Operator [FS_38]
-        Select Operator [SEL_37] (rows=1 width=106)
+      File Output Operator [FS_37]
+        Select Operator [SEL_36] (rows=1 width=106)
           Output:["_col0","_col1"]
         <-Reducer 4 [SIMPLE_EDGE]
-          SHUFFLE [RS_36]
-            Select Operator [SEL_35] (rows=1 width=106)
+          SHUFFLE [RS_35]
+            Select Operator [SEL_34] (rows=1 width=106)
               Output:["_col0","_col1"]
-              Filter Operator [FIL_34] (rows=1 width=204)
+              Filter Operator [FIL_33] (rows=1 width=204)
                 predicate:_col3 is null
-                Merge Join Operator [MERGEJOIN_43] (rows=1 width=204)
-                  Conds:RS_31._col0, _col1=RS_32._col0, _col1(Left Outer),Output:["_col0","_col1","_col3"]
+                Merge Join Operator [MERGEJOIN_42] (rows=1 width=204)
+                  Conds:RS_30._col0, _col1=RS_31._col0, _col1(Left Outer),Output:["_col0","_col1","_col3"]
                 <-Reducer 10 [SIMPLE_EDGE]
-                  SHUFFLE [RS_32]
+                  SHUFFLE [RS_31]
                     PartitionCols:_col0, _col1
                     Select Operator [SEL_26] (rows=1 width=106)
                       Output:["_col0","_col1"]
-                      Filter Operator [FIL_40] (rows=1 width=114)
+                      Filter Operator [FIL_39] (rows=1 width=114)
                         predicate:((_col2 - _col1) > 600.0)
                         Group By Operator [GBY_24] (rows=5 width=114)
                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)"],keys:KEY._col0
@@ -2605,17 +2601,15 @@ Stage-0
                             PartitionCols:_col0
                             Group By Operator [GBY_22] (rows=5 width=114)
                               Output:["_col0","_col1","_col2"],aggregations:["min(p_retailprice)","max(p_retailprice)"],keys:p_mfgr
-                              Select Operator [SEL_21] (rows=26 width=106)
-                                Output:["p_mfgr","p_retailprice"]
-                                TableScan [TS_20] (rows=26 width=106)
-                                  default@part,b,Tbl:COMPLETE,Col:COMPLETE,Output:["p_mfgr","p_retailprice"]
+                              TableScan [TS_20] (rows=26 width=106)
+                                default@part,b,Tbl:COMPLETE,Col:COMPLETE,Output:["p_mfgr","p_retailprice"]
                 <-Reducer 3 [SIMPLE_EDGE]
-                  SHUFFLE [RS_31]
+                  SHUFFLE [RS_30]
                     PartitionCols:_col0, _col1
-                    Merge Join Operator [MERGEJOIN_42] (rows=5 width=106)
+                    Merge Join Operator [MERGEJOIN_41] (rows=5 width=106)
                       Conds:(Inner),Output:["_col0","_col1"]
                     <-Reducer 2 [SIMPLE_EDGE]
-                      SHUFFLE [RS_28]
+                      SHUFFLE [RS_27]
                         Group By Operator [GBY_4] (rows=5 width=106)
                           Output:["_col0","_col1"],aggregations:["min(VALUE._col0)"],keys:KEY._col0
                         <-Map 1 [SIMPLE_EDGE]
@@ -2628,7 +2622,7 @@ Stage-0
                                 TableScan [TS_0] (rows=26 width=106)
                                   default@part,b,Tbl:COMPLETE,Col:COMPLETE,Output:["p_mfgr","p_retailprice"]
                     <-Reducer 8 [SIMPLE_EDGE]
-                      SHUFFLE [RS_29]
+                      SHUFFLE [RS_28]
                         Select Operator [SEL_19] (rows=1 width=8)
                           Filter Operator [FIL_18] (rows=1 width=8)
                             predicate:(_col0 = 0)
@@ -3296,17 +3290,17 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 2
-      File Output Operator [FS_9]
-        Merge Join Operator [MERGEJOIN_10] (rows=250000 width=87)
+      File Output Operator [FS_8]
+        Merge Join Operator [MERGEJOIN_9] (rows=250000 width=87)
           Conds:(Inner),Output:["_col0"]
         <-Map 1 [SIMPLE_EDGE]
-          SHUFFLE [RS_5]
+          SHUFFLE [RS_4]
             Select Operator [SEL_1] (rows=500 width=87)
               Output:["_col0"]
               TableScan [TS_0] (rows=500 width=87)
                 default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key"]
         <-Map 3 [SIMPLE_EDGE]
-          SHUFFLE [RS_6]
+          SHUFFLE [RS_5]
             Select Operator [SEL_3] (rows=500 width=4)
               TableScan [TS_2] (rows=500 width=10)
                 default@src,src,Tbl:COMPLETE,Col:COMPLETE
@@ -5870,10 +5864,10 @@ Stage-0
                 Select Operator [SEL_7] (rows=1 width=33)
                   Output:["_col0","_col1","_col2"]
                   Map Join Operator [MAPJOIN_17] (rows=1 width=33)
-                    Conds:SEL_1.UDFToDouble(_col0)=RS_5.(UDFToDouble(_col0) + UDFToDouble(1))(Left Outer),HybridGraceHashJoin:true,Output:["_col0","_col1","_col2"]
+                    Conds:SEL_1.UDFToDouble(_col0)=RS_5.(UDFToDouble(_col0) + 1.0)(Left Outer),HybridGraceHashJoin:true,Output:["_col0","_col1","_col2"]
                   <-Map 4 [BROADCAST_EDGE]
                     BROADCAST [RS_5]
-                      PartitionCols:(UDFToDouble(_col0) + UDFToDouble(1))
+                      PartitionCols:(UDFToDouble(_col0) + 1.0)
                       Select Operator [SEL_3] (rows=1 width=30)
                         Output:["_col0"]
                         TableScan [TS_2] (rows=1 width=30)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/explainuser_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_4.q.out b/ql/src/test/results/clientpositive/tez/explainuser_4.q.out
index 661f95f..0b07a29 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_4.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_4.q.out
@@ -49,7 +49,7 @@ Stage-0
                 Select Operator [SEL_5] (rows=6144 width=215)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
                   Filter Operator [FIL_16] (rows=6144 width=215)
-                    predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
+                    predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
                     TableScan [TS_3] (rows=12288 width=215)
                       default@alltypesorc,a,Tbl:COMPLETE,Col:NONE,Output:["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"]
 
@@ -130,7 +130,7 @@ Stage-0
                   Select Operator [SEL_5] (rows=6144 width=215)
                     Output:["_col0"]
                     Filter Operator [FIL_18] (rows=6144 width=215)
-                      predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
+                      predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
                       TableScan [TS_3] (rows=12288 width=215)
                         default@alltypesorc,a,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
 
@@ -210,7 +210,7 @@ Stage-0
                       Select Operator [SEL_5] (rows=6144 width=215)
                         Output:["_col0"]
                         Filter Operator [FIL_20] (rows=6144 width=215)
-                          predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
+                          predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
                           TableScan [TS_3] (rows=12288 width=215)
                             default@alltypesorc,a,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
 
@@ -281,7 +281,7 @@ Stage-0
                 Select Operator [SEL_5] (rows=6144 width=215)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
                   Filter Operator [FIL_16] (rows=6144 width=215)
-                    predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
+                    predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
                     TableScan [TS_3] (rows=12288 width=215)
                       default@alltypesorc,a,Tbl:COMPLETE,Col:NONE,Output:["ctinyint","csmallint","cint","cbigint","cfloat","cdouble","cstring1","cstring2","ctimestamp1","ctimestamp2","cboolean1","cboolean2"]
             <-Map 1 [CUSTOM_SIMPLE_EDGE]
@@ -362,7 +362,7 @@ Stage-0
                   Select Operator [SEL_5] (rows=6144 width=215)
                     Output:["_col0"]
                     Filter Operator [FIL_18] (rows=6144 width=215)
-                      predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
+                      predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
                       TableScan [TS_3] (rows=12288 width=215)
                         default@alltypesorc,a,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
               <-Map 1 [CUSTOM_SIMPLE_EDGE]
@@ -442,7 +442,7 @@ Stage-0
                       Select Operator [SEL_5] (rows=6144 width=215)
                         Output:["_col0"]
                         Filter Operator [FIL_20] (rows=6144 width=215)
-                          predicate:(cint BETWEEN 1000000 AND 3000000 and cbigint is not null)
+                          predicate:(cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000)
                           TableScan [TS_3] (rows=12288 width=215)
                             default@alltypesorc,a,Tbl:COMPLETE,Col:NONE,Output:["cint","cbigint"]
                   <-Map 1 [CUSTOM_SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
index 7c22d9a..25bd48f 100644
--- a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
@@ -1250,7 +1250,7 @@ POSTHOOK: Lineage: decimal_mapjoin.cdecimal1 EXPRESSION [(alltypesorc)alltypesor
 POSTHOOK: Lineage: decimal_mapjoin.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_mapjoin.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: decimal_mapjoin.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1331,7 +1331,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1446,7 +1446,7 @@ POSTHOOK: Input: default@decimal_mapjoin
 6981	6981	-515.6210729730	NULL
 6981	6981	-515.6210729730	NULL
 6981	6981	-515.6210729730	NULL
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint
@@ -1527,7 +1527,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[13][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l
   JOIN decimal_mapjoin r ON l.cint = r.cint

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/mergejoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mergejoin.q.out b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
index 299c4db..1f70c46 100644
--- a/ql/src/test/results/clientpositive/tez/mergejoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/mergejoin.q.out
@@ -2677,6 +2677,7 @@ POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 NULL	NULL	NULL	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08
+Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: select * from
 (select * from tab where tab.key = 0)a
 full outer join
@@ -2684,6 +2685,7 @@ full outer join
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tab
 PREHOOK: Input: default@tab_part
+PREHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
 POSTHOOK: query: select * from
 (select * from tab where tab.key = 0)a
@@ -2692,7 +2694,9 @@ full outer join
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@tab
 POSTHOOK: Input: default@tab_part
+POSTHOOK: Input: default@tab_part@ds=2008-04-08
 #### A masked pattern was here ####
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product
 PREHOOK: query: select * from
 (select * from tab where tab.key = 0)a
 full outer join
@@ -2715,7 +2719,7 @@ NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
 NULL	NULL	NULL	98	val_98	2008-04-08	98	val_98	2008-04-08
-Warning: Shuffle Join MERGEJOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select * from
 (select * from tab where tab.key = 0)a
 join

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out b/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
index 2a35c84..92fdbe1 100644
--- a/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
+++ b/ql/src/test/results/clientpositive/tez/partition_column_names_with_leading_and_trailing_spaces.q.out
@@ -51,7 +51,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@foo_p
 POSTHOOK: Input: default@foo_p@p=a 
 #### A masked pattern was here ####
-1	a 
+1	a
 PREHOOK: query: select * from foo_p where p="a"
 PREHOOK: type: QUERY
 PREHOOK: Input: default@foo_p

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/skewjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/skewjoin.q.out b/ql/src/test/results/clientpositive/tez/skewjoin.q.out
index 0ee28fb..cba0aac 100644
--- a/ql/src/test/results/clientpositive/tez/skewjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/skewjoin.q.out
@@ -735,9 +735,9 @@ STAGE PLANS:
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                        key expressions: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                         sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                        Map-reduce partition columns: _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
@@ -747,7 +747,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 _col0 (type: string), UDFToDouble(substring(_col1, 5)) (type: double)
-                  1 _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + UDFToDouble(1)) (type: double)
+                  1 _col0 (type: string), (UDFToDouble(substring(_col1, 5)) + 1.0) (type: double)
                 outputColumnNames: _col2, _col3
                 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                 Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_1.q.out
index eb40bd7..a88c33a 100644
--- a/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_dynpart_hashjoin_1.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -178,7 +178,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -301,7 +301,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -447,7 +447,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cbigint (type: bigint), cfloat (type: float), cdouble (type: double), cstring1 (type: string), cstring2 (type: string), ctimestamp1 (type: timestamp), ctimestamp2 (type: timestamp), cboolean1 (type: boolean), cboolean2 (type: boolean)
@@ -575,7 +575,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)
@@ -701,7 +701,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (cint BETWEEN 1000000 AND 3000000 and cbigint is not null) (type: boolean)
+                    predicate: (cint is not null and cbigint is not null and cint BETWEEN 1000000 AND 3000000) (type: boolean)
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int)


[33/66] [abbrv] hive git commit: HIVE-13797: Provide a connection string example in beeline (Vihang Karajgaonkar, reviewed Lefty Leverenz and Sergio Pena)

Posted by sp...@apache.org.
HIVE-13797: Provide a connection string example in beeline (Vihang Karajgaonkar, reviewed Lefty Leverenz and Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3a2a3e1f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3a2a3e1f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3a2a3e1f

Branch: refs/heads/java8
Commit: 3a2a3e1f940e68acdf4c8d38d0813e6515a67059
Parents: dee4552
Author: Vihang Karajgaonkar <vi...@cloudera.com>
Authored: Wed May 25 16:28:22 2016 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Wed May 25 16:28:22 2016 -0500

----------------------------------------------------------------------
 beeline/src/main/resources/BeeLine.properties | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3a2a3e1f/beeline/src/main/resources/BeeLine.properties
----------------------------------------------------------------------
diff --git a/beeline/src/main/resources/BeeLine.properties b/beeline/src/main/resources/BeeLine.properties
index 16f23a8..e940a7d 100644
--- a/beeline/src/main/resources/BeeLine.properties
+++ b/beeline/src/main/resources/BeeLine.properties
@@ -188,6 +188,16 @@ cmd-usage: Usage: java org.apache.hive.cli.beeline.BeeLine \n \
 \  --addlocaldrivername=DRIVERNAME Add drvier name needs to be supported in the beeline client side\n \
 \  --showConnectedUrl=[true/false] Prompt HiveServer2's URI to which this beeline connected.\n \
 \                                  Only works for HiveServer2 cluster mode.\n \
-\  --help                          display this message
-
-
+\  --help                          display this message\n \
+\n \
+\  Example:\n \
+\   1. Connect using simple authentication to HiveServer2 on localhost:10000\n \
+\   $ beeline -u jdbc:hive2://localhost:10000 username password\n\n \
+\   2. Connect using simple authentication to HiveServer2 on hs.local:10000 using -n for username and -p for password\n \
+\   $ beeline -n username -p password -u jdbc:hive2://hs2.local:10012\n\n \
+\   3. Connect using Kerberos authentication with hive/localhost@mydomain.com as HiveServer2 principal\n \
+\   $ beeline -u "jdbc:hive2://hs2.local:10013/default;principal=hive/localhost@mydomain.com\n\n \
+\   4. Connect using SSL connection to HiveServer2 on localhost at 10000\n \
+\   $ beeline jdbc:hive2://localhost:10000/default;ssl=true;sslTrustStore=/usr/local/truststore;trustStorePassword=mytruststorepassword\n\n \
+\   5. Connect using LDAP authentication\n \
+\   $ beeline -u jdbc:hive2://hs2.local:10013/default <ldap-username> <ldap-password>\n \


[17/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/10423f51
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/10423f51
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/10423f51

Branch: refs/heads/java8
Commit: 10423f51c86fd3266b8756225d8dfdf3c523e9c3
Parents: ec4b936
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Sat May 14 11:39:38 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Tue May 24 11:31:16 2016 +0100

----------------------------------------------------------------------
 .../results/positive/hbase_ppd_key_range.q.out  |   1 +
 .../test/results/positive/hbase_queries.q.out   |   5 +
 .../ql/optimizer/ConstantPropagateProcCtx.java  |   4 +
 .../ql/optimizer/NonBlockingOpDeDupProc.java    |   5 +-
 .../hadoop/hive/ql/optimizer/Optimizer.java     |  12 +-
 .../optimizer/SortedDynPartitionOptimizer.java  |  34 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |  18 +-
 .../ql/optimizer/calcite/HiveRelOptUtil.java    |  38 +-
 .../hive/ql/optimizer/calcite/HiveRexUtil.java  | 332 +++++++--
 .../rules/HiveAggregatePullUpConstantsRule.java |  47 ++
 .../HiveProjectFilterPullUpConstantsRule.java   | 177 +++++
 .../rules/HiveReduceExpressionsRule.java        | 186 +++--
 .../rules/HiveSortLimitPullUpConstantsRule.java |  20 +-
 .../rules/HiveUnionPullUpConstantsRule.java     |  27 +-
 .../calcite/stats/HiveRelMdPredicates.java      | 532 --------------
 .../calcite/translator/ASTConverter.java        |   5 +
 .../calcite/translator/ExprNodeConverter.java   |  28 +-
 .../calcite/translator/HiveGBOpConvUtil.java    |   3 +-
 .../calcite/translator/HiveOpConverter.java     |   7 +-
 .../translator/HiveOpConverterPostProc.java     |   2 +-
 .../calcite/translator/RexNodeConverter.java    |  15 +-
 .../translator/SqlFunctionConverter.java        |   9 +-
 .../calcite/translator/TypeConverter.java       |  20 +-
 .../ql/optimizer/pcr/PcrExprProcFactory.java    |  16 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |  22 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  22 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |  13 +
 .../hadoop/hive/ql/plan/ExprNodeDescUtils.java  |  74 +-
 ql/src/test/queries/clientpositive/join_view.q  |   4 +-
 .../clientpositive/annotate_stats_filter.q.out  |   8 +-
 .../archive_excludeHadoop20.q.out               |   1 +
 .../results/clientpositive/archive_multi.q.out  |   1 +
 .../authorization_explain.q.java1.7.out         |   4 +-
 .../results/clientpositive/auto_join33.q.out    |   8 +-
 .../results/clientpositive/auto_join8.q.out     |   2 +-
 .../clientpositive/auto_join_filters.q.out      |   4 +-
 .../clientpositive/auto_join_nulls.q.out        |   4 +-
 .../clientpositive/auto_sortmerge_join_12.q.out |   4 +-
 .../results/clientpositive/bucket_groupby.q.out |  64 +-
 .../bucketizedhiveinputformat.q.out             |   2 +-
 ql/src/test/results/clientpositive/cast1.q.out  |   4 +-
 .../test/results/clientpositive/cbo_const.q.out |   5 +-
 .../cbo_rp_annotate_stats_groupby.q.out         |  70 +-
 .../results/clientpositive/cbo_rp_join1.q.out   |   2 +-
 .../clientpositive/cbo_rp_lineage2.q.out        |   2 +-
 .../clientpositive/colstats_all_nulls.q.out     |   6 +-
 .../constantPropagateForSubQuery.q.out          |   4 +-
 .../clientpositive/constant_prop_3.q.out        |   2 +-
 .../results/clientpositive/constprog2.q.out     |  20 +-
 .../results/clientpositive/constprog3.q.out     |  16 +-
 .../clientpositive/constprog_partitioner.q.out  |  14 +-
 .../clientpositive/constprog_semijoin.q.out     |  46 +-
 ql/src/test/results/clientpositive/cp_sel.q.out |  16 +-
 .../clientpositive/create_genericudf.q.out      |   6 +-
 .../results/clientpositive/create_view.q.out    |   2 +-
 .../results/clientpositive/cross_join.q.out     |   8 +-
 .../clientpositive/cross_join_merge.q.out       |   6 +-
 .../clientpositive/cross_product_check_1.q.out  |  12 +-
 .../clientpositive/cross_product_check_2.q.out  |  16 +-
 ql/src/test/results/clientpositive/cte_5.q.out  |   2 +-
 .../test/results/clientpositive/cte_mat_1.q.out |  13 +-
 .../test/results/clientpositive/cte_mat_2.q.out |  13 +-
 .../results/clientpositive/decimal_stats.q.out  |   2 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |  78 +--
 .../dynpart_sort_optimization.q.out             | 104 +--
 .../dynpart_sort_optimization2.q.out            |  44 +-
 .../clientpositive/explain_logical.q.out        |  48 +-
 .../clientpositive/filter_cond_pushdown.q.out   |   2 +-
 .../test/results/clientpositive/fold_case.q.out |  60 +-
 .../clientpositive/fold_eq_with_case_when.q.out |   4 +-
 .../test/results/clientpositive/fold_when.q.out |   4 +-
 .../clientpositive/folder_predicate.q.out       |  50 +-
 .../results/clientpositive/groupby_ppd.q.out    |  46 +-
 .../clientpositive/groupby_sort_1_23.q.out      |  86 +--
 .../clientpositive/groupby_sort_skew_1_23.q.out | 110 +--
 .../clientpositive/index_auto_unused.q.out      |  38 +-
 .../clientpositive/infer_join_preds.q.out       |  66 +-
 .../test/results/clientpositive/input23.q.out   |   4 +-
 .../test/results/clientpositive/input26.q.out   |  40 +-
 ql/src/test/results/clientpositive/input6.q.out |   2 +-
 ql/src/test/results/clientpositive/input8.q.out |   4 +-
 .../results/clientpositive/input_part10.q.out   |   5 +-
 .../results/clientpositive/insert_into5.q.out   |   5 +-
 .../insert_nonacid_from_acid.q.out              |   2 +-
 ql/src/test/results/clientpositive/join38.q.out |  22 +-
 ql/src/test/results/clientpositive/join42.q.out |   4 +-
 ql/src/test/results/clientpositive/join8.q.out  |   2 +-
 .../clientpositive/join_alt_syntax.q.out        |   4 +-
 .../clientpositive/join_cond_pushdown_1.q.out   |   4 +-
 .../clientpositive/join_cond_pushdown_3.q.out   |   4 +-
 .../join_cond_pushdown_unqual1.q.out            |   4 +-
 .../join_cond_pushdown_unqual3.q.out            |   4 +-
 .../results/clientpositive/join_filters.q.out   |   4 +-
 .../results/clientpositive/join_nulls.q.out     |   4 +-
 .../results/clientpositive/join_reorder.q.out   |   6 +-
 .../test/results/clientpositive/join_view.q.out |  13 +-
 .../test/results/clientpositive/lineage2.q.out  |   2 +-
 .../test/results/clientpositive/lineage3.q.out  |  14 +-
 .../list_bucket_query_oneskew_1.q.out           |   2 +-
 .../list_bucket_query_oneskew_2.q.out           |   8 +-
 .../results/clientpositive/llap/cte_mat_1.q.out |  13 +-
 .../results/clientpositive/llap/cte_mat_2.q.out |  13 +-
 .../llap/dynamic_partition_pruning.q.out        | 231 +++----
 .../llap/dynamic_partition_pruning_2.q.out      |   4 +-
 .../llap/hybridgrace_hashjoin_1.q.out           |   8 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |  12 +-
 .../clientpositive/llap/tez_self_join.q.out     |  32 +-
 .../llap/tez_union_dynamic_partition.q.out      |  44 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |  12 +-
 .../vectorized_dynamic_partition_pruning.q.out  | 231 +++----
 .../test/results/clientpositive/masking_2.q.out |   2 +-
 .../test/results/clientpositive/mergejoin.q.out |   6 +-
 .../nonblock_op_deduplicate.q.out               |   8 +-
 .../test/results/clientpositive/orc_llap.q.out  |   8 +-
 .../clientpositive/partition_multilevels.q.out  |  40 +-
 ql/src/test/results/clientpositive/pcr.q.out    | 102 +--
 .../results/clientpositive/perf/query18.q.out   |   6 +-
 .../results/clientpositive/perf/query26.q.out   |   6 +-
 .../results/clientpositive/perf/query27.q.out   |   6 +-
 .../results/clientpositive/perf/query28.q.out   |  34 +-
 .../results/clientpositive/perf/query31.q.out   | 556 ++++++++-------
 .../results/clientpositive/perf/query39.q.out   |  60 +-
 .../results/clientpositive/perf/query42.q.out   |  34 +-
 .../results/clientpositive/perf/query48.q.out   |   6 +-
 .../results/clientpositive/perf/query52.q.out   |  16 +-
 .../results/clientpositive/perf/query64.q.out   |  42 +-
 .../results/clientpositive/perf/query66.q.out   | 304 ++++----
 .../results/clientpositive/perf/query7.q.out    |   6 +-
 .../results/clientpositive/perf/query72.q.out   |   6 +-
 .../results/clientpositive/perf/query75.q.out   | 686 +++++++++----------
 .../results/clientpositive/perf/query88.q.out   | 136 ++--
 .../results/clientpositive/perf/query90.q.out   |  46 +-
 .../results/clientpositive/pointlookup2.q.out   | 100 +--
 .../results/clientpositive/pointlookup3.q.out   | 100 +--
 .../clientpositive/ppd_constant_expr.q.out      |   8 +-
 .../test/results/clientpositive/ppd_join5.q.out |   6 +-
 .../clientpositive/ppd_outer_join4.q.out        |  36 +-
 .../clientpositive/ppd_outer_join5.q.out        |   6 +-
 .../clientpositive/ppd_repeated_alias.q.out     |   2 +-
 .../results/clientpositive/ppd_udf_case.q.out   |  64 +-
 .../results/clientpositive/ppd_union_view.q.out |  48 +-
 .../results/clientpositive/quotedid_basic.q.out | 100 ++-
 .../clientpositive/quotedid_partition.q.out     |  18 +-
 .../clientpositive/rand_partitionpruner3.q.out  |  12 +-
 .../results/clientpositive/recursive_dir.q.out  |   2 +-
 .../test/results/clientpositive/semijoin4.q.out |  68 +-
 .../test/results/clientpositive/semijoin5.q.out |  28 +-
 .../test/results/clientpositive/skewjoin.q.out  |   6 +-
 .../results/clientpositive/smb_mapjoin_25.q.out |  59 +-
 .../clientpositive/spark/auto_join8.q.out       |   2 +-
 .../spark/auto_join_filters.q.out               |   4 +-
 .../clientpositive/spark/auto_join_nulls.q.out  |   4 +-
 .../spark/auto_sortmerge_join_12.q.out          |   4 +-
 .../spark/bucketizedhiveinputformat.q.out       |   2 +-
 .../spark/constprog_semijoin.q.out              |  46 +-
 .../clientpositive/spark/cross_join.q.out       |   8 +-
 .../spark/cross_product_check_1.q.out           |  12 +-
 .../spark/cross_product_check_2.q.out           |  12 +-
 .../spark/dynamic_rdd_cache.q.out               |  78 +--
 .../spark/groupby_sort_1_23.q.out               |  78 +--
 .../spark/groupby_sort_skew_1_23.q.out          |  90 +--
 .../results/clientpositive/spark/join38.q.out   |  22 +-
 .../results/clientpositive/spark/join8.q.out    |   2 +-
 .../clientpositive/spark/join_alt_syntax.q.out  |   4 +-
 .../spark/join_cond_pushdown_1.q.out            |   4 +-
 .../spark/join_cond_pushdown_3.q.out            |   4 +-
 .../spark/join_cond_pushdown_unqual1.q.out      |   4 +-
 .../spark/join_cond_pushdown_unqual3.q.out      |   4 +-
 .../clientpositive/spark/join_reorder.q.out     |   6 +-
 .../clientpositive/spark/join_view.q.out        |  15 +-
 .../test/results/clientpositive/spark/pcr.q.out |  46 +-
 .../clientpositive/spark/ppd_join5.q.out        |   6 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |  36 +-
 .../clientpositive/spark/ppd_outer_join5.q.out  |   6 +-
 .../results/clientpositive/spark/skewjoin.q.out |   6 +-
 .../clientpositive/spark/smb_mapjoin_25.q.out   |  37 +-
 .../spark/table_access_keys_stats.q.out         |  17 +-
 .../clientpositive/spark/union_view.q.out       |  20 +-
 .../spark/vector_mapjoin_reduce.q.out           |  10 +-
 .../spark/vector_outer_join1.q.out              |   8 +-
 .../spark/vector_outer_join2.q.out              |   8 +-
 .../spark/vector_outer_join3.q.out              |   8 +-
 .../spark/vector_outer_join4.q.out              |   8 +-
 .../spark/vector_outer_join5.q.out              |   8 +-
 .../spark/vectorization_short_regress.q.out     |   4 +-
 .../results/clientpositive/subquery_notin.q.out |  50 +-
 .../subquery_notin_having.q.java1.7.out         |  38 +-
 .../subquery_unqualcolumnrefs.q.out             |   2 +-
 .../results/clientpositive/subquery_views.q.out |   8 +-
 .../table_access_keys_stats.q.out               |  17 +-
 .../clientpositive/tez/auto_join_filters.q.out  |   4 +-
 .../clientpositive/tez/auto_join_nulls.q.out    |   4 +-
 .../tez/auto_sortmerge_join_12.q.out            |   4 +-
 .../clientpositive/tez/constprog_semijoin.q.out |  54 +-
 .../results/clientpositive/tez/cross_join.q.out |   8 +-
 .../tez/cross_product_check_1.q.out             |  12 +-
 .../tez/cross_product_check_2.q.out             |  12 +-
 .../results/clientpositive/tez/cte_mat_1.q.out  |  11 +-
 .../results/clientpositive/tez/cte_mat_2.q.out  |  11 +-
 .../tez/dynamic_partition_pruning.q.out         | 230 +++----
 .../tez/dynamic_partition_pruning_2.q.out       |   4 +-
 .../tez/dynpart_sort_optimization.q.out         | 104 +--
 .../tez/dynpart_sort_optimization2.q.out        |  44 +-
 .../clientpositive/tez/explainuser_1.q.out      | 148 ++--
 .../clientpositive/tez/explainuser_4.q.out      |  12 +-
 .../tez/hybridgrace_hashjoin_1.q.out            |   8 +-
 .../results/clientpositive/tez/mergejoin.q.out  |   6 +-
 ...names_with_leading_and_trailing_spaces.q.out |   2 +-
 .../results/clientpositive/tez/skewjoin.q.out   |   6 +-
 .../tez/tez_dynpart_hashjoin_1.q.out            |  12 +-
 .../clientpositive/tez/tez_self_join.q.out      |  32 +-
 .../tez/tez_union_dynamic_partition.q.out       |  44 +-
 .../tez/tez_vector_dynpart_hashjoin_1.q.out     |  12 +-
 .../clientpositive/tez/union_fast_stats.q.out   |  16 +-
 .../tez/vector_between_columns.q.out            |   4 +-
 .../tez/vector_binary_join_groupby.q.out        |   2 +-
 .../clientpositive/tez/vector_coalesce.q.out    |  10 +-
 .../clientpositive/tez/vector_date_1.q.out      |   2 +-
 .../clientpositive/tez/vector_decimal_2.q.out   |  52 +-
 .../tez/vector_decimal_round_2.q.out            |   8 +-
 .../tez/vector_groupby_mapjoin.q.out            |  28 +-
 .../clientpositive/tez/vector_interval_1.q.out  |   8 +-
 .../tez/vector_interval_arithmetic.q.out        |   5 +-
 .../tez/vector_join_filters.q.out               |   2 +-
 .../clientpositive/tez/vector_join_nulls.q.out  |   4 +-
 .../tez/vector_mapjoin_reduce.q.out             |  12 +-
 .../tez/vector_null_projection.q.out            |  10 +-
 .../clientpositive/tez/vector_outer_join1.q.out |   8 +-
 .../clientpositive/tez/vector_outer_join2.q.out |   8 +-
 .../clientpositive/tez/vector_outer_join3.q.out |   8 +-
 .../clientpositive/tez/vector_outer_join4.q.out |   8 +-
 .../clientpositive/tez/vector_outer_join5.q.out |  12 +-
 .../tez/vectorization_short_regress.q.out       |   4 +-
 .../vectorized_dynamic_partition_pruning.q.out  | 232 +++----
 .../clientpositive/udf_folder_constants.q.out   |   8 +-
 .../clientpositive/udf_unix_timestamp.q.out     |   2 +-
 .../clientpositive/union_fast_stats.q.out       |  16 +-
 .../results/clientpositive/union_offcbo.q.out   |  40 +-
 .../clientpositive/union_remove_12.q.out        |   2 +-
 .../clientpositive/union_remove_14.q.out        |   2 +-
 .../results/clientpositive/union_view.q.out     |  20 +-
 .../clientpositive/unionall_unbalancedppd.q.out |  36 +-
 .../clientpositive/vector_between_columns.q.out |   4 +-
 .../vector_binary_join_groupby.q.out            |   2 +-
 .../clientpositive/vector_coalesce.q.out        |  10 +-
 .../results/clientpositive/vector_date_1.q.out  |   2 +-
 .../clientpositive/vector_decimal_2.q.out       |  52 +-
 .../clientpositive/vector_decimal_round_2.q.out |   8 +-
 .../clientpositive/vector_groupby_mapjoin.q.out |   4 +-
 .../clientpositive/vector_interval_1.q.out      |   8 +-
 .../vector_interval_arithmetic.q.out            |   5 +-
 .../clientpositive/vector_join_filters.q.out    |   2 +-
 .../clientpositive/vector_join_nulls.q.out      |   4 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |  10 +-
 .../clientpositive/vector_null_projection.q.out |  10 +-
 .../vector_number_compare_projection.q.out      |  10 +-
 .../clientpositive/vector_outer_join1.q.out     |   8 +-
 .../clientpositive/vector_outer_join2.q.out     |   8 +-
 .../clientpositive/vector_outer_join3.q.out     |   8 +-
 .../clientpositive/vector_outer_join4.q.out     |   8 +-
 .../clientpositive/vector_outer_join5.q.out     |   8 +-
 .../vectorization_short_regress.q.out           |   4 +-
 262 files changed, 4230 insertions(+), 4395 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
index 27446b4..0ef0efd 100644
--- a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
@@ -438,6 +438,7 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: hbase_pushdown
+            filterExpr: (key >= '90') (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
               predicate: (key >= '90') (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/hbase-handler/src/test/results/positive/hbase_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index a99f561..8aa5f84 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -917,7 +917,12 @@ WITH SERDEPROPERTIES (
   'hbase.columns.mapping'='cf:string', 
   'serialization.format'='1')
 TBLPROPERTIES (
+  'COLUMN_STATS_ACCURATE'='{\"BASIC_STATS\":\"true\"}', 
   'hbase.table.name'='hbase_table_0', 
+  'numFiles'='0', 
+  'numRows'='0', 
+  'rawDataSize'='0', 
+  'totalSize'='0', 
 #### A masked pattern was here ####
 PREHOOK: query: DROP TABLE IF EXISTS hbase_table_9
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
index bc52f7b..89de234 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java
@@ -216,6 +216,10 @@ public class ConstantPropagateProcCtx implements NodeProcessorCtx {
         Operator<?> parent = op.getParentOperators().get(0);
         if (op.getColumnExprMap() != null && op.getColumnExprMap().entrySet() != null) {
           for (Entry<String, ExprNodeDesc> entry : op.getColumnExprMap().entrySet()) {
+            if (op.getSchema().getPosition(entry.getKey()) == -1) {
+              // Not present
+              continue;
+            }
             ExprNodeDesc expr = entry.getValue();
             if (expr instanceof ExprNodeColumnDesc) {
               String parentColName = ((ExprNodeColumnDesc) expr).getColumn();

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
index 37dbe32..de4d0e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java
@@ -28,7 +28,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
-import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.exec.FilterOperator;
 import org.apache.hadoop.hive.ql.exec.JoinOperator;
 import org.apache.hadoop.hive.ql.exec.MapJoinOperator;
@@ -109,7 +108,7 @@ public class NonBlockingOpDeDupProc extends Transform {
         if (cSEL.getColumnExprMap() == null) {
           // If the child SelectOperator does not have the ColumnExprMap,
           // we do not need to update the ColumnExprMap in the parent SelectOperator.
-          pSEL.getConf().setColList(ExprNodeDescUtils.backtrack(cSELColList, cSEL, pSEL));
+          pSEL.getConf().setColList(ExprNodeDescUtils.backtrack(cSELColList, cSEL, pSEL, true));
           pSEL.getConf().setOutputColumnNames(cSELOutputColumnNames);
         } else {
           // If the child SelectOperator has the ColumnExprMap,
@@ -121,7 +120,7 @@ public class NonBlockingOpDeDupProc extends Transform {
             String outputColumnName = cSELOutputColumnNames.get(i);
             ExprNodeDesc cSELExprNodeDesc = cSELColList.get(i);
             ExprNodeDesc newPSELExprNodeDesc =
-                ExprNodeDescUtils.backtrack(cSELExprNodeDesc, cSEL, pSEL);
+                ExprNodeDescUtils.backtrack(cSELExprNodeDesc, cSEL, pSEL, true);
             newPSELColList.add(newPSELExprNodeDesc);
             newPSELOutputColumnNames.add(outputColumnName);
             colExprMap.put(outputColumnName, newPSELExprNodeDesc);

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
index bf9a0a3..5ee54b9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java
@@ -129,13 +129,11 @@ public class Optimizer {
         /* Add list bucketing pruner. */
         transformations.add(new ListBucketingPruner());
       }
-    }
-    if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)
-            && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) ||
-            (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)
-                    && pctx.getContext().isCboSucceeded())) {
-      // PartitionPruner may create more folding opportunities, run ConstantPropagate again.
-      transformations.add(new ConstantPropagate());
+      if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) &&
+              !pctx.getContext().isCboSucceeded()) {
+        // PartitionPruner may create more folding opportunities, run ConstantPropagate again.
+        transformations.add(new ConstantPropagate());
+      }
     }
 
     if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTGROUPBY) ||

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
index 4adf7b2..36b7036 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.FileSinkDesc;
 import org.apache.hadoop.hive.ql.plan.ListBucketingCtx;
@@ -287,7 +288,7 @@ public class SortedDynPartitionOptimizer extends Transform {
       }
       if (op.getColumnExprMap() != null) {
         for(String dpCol : dpCols) {
-          ExprNodeDesc end = findConstantExprOrigin(dpCol, op);
+          ExprNodeDesc end = ExprNodeDescUtils.findConstantExprOrigin(dpCol, op);
           if (!(end instanceof ExprNodeConstantDesc)) {
             return false;
           }
@@ -298,37 +299,6 @@ public class SortedDynPartitionOptimizer extends Transform {
       return true;
     }
 
-    // Find the constant origin of a certain column if it is originated from a constant
-    // Otherwise, it returns the expression that originated the column
-    private ExprNodeDesc findConstantExprOrigin(String dpCol, Operator<? extends OperatorDesc> op) {
-      ExprNodeDesc expr = op.getColumnExprMap().get(dpCol);
-      ExprNodeDesc foldedExpr;
-      // If it is a function, we try to fold it
-      if (expr instanceof ExprNodeGenericFuncDesc) {
-        foldedExpr = ConstantPropagateProcFactory.foldExpr((ExprNodeGenericFuncDesc)expr);
-        if (foldedExpr == null) {
-          foldedExpr = expr;
-        }
-      } else {
-        foldedExpr = expr;
-      }
-      // If it is a column reference, we will try to resolve it
-      if (foldedExpr instanceof ExprNodeColumnDesc) {
-        Operator<? extends OperatorDesc> originOp = null;
-        for(Operator<? extends OperatorDesc> parentOp : op.getParentOperators()) {
-          if (parentOp.getColumnExprMap() != null) {
-            originOp = parentOp;
-            break;
-          }
-        }
-        if (originOp != null) {
-          return findConstantExprOrigin(((ExprNodeColumnDesc)foldedExpr).getColumn(), originOp);
-        }
-      }
-      // Otherwise, we return the expression
-      return foldedExpr;
-    }
-
     // Remove RS and SEL introduced by enforce bucketing/sorting config
     // Convert PARENT -> RS -> SEL -> FS to PARENT -> FS
     private boolean removeRSInsertedByEnforceBucketing(FileSinkOperator fsOp) {

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 0cfd529..7febfd5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -22,7 +22,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -30,8 +29,6 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.Stack;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
@@ -67,6 +64,7 @@ import org.apache.hadoop.hive.ql.plan.AggregationDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount;
@@ -81,6 +79,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.Pr
 import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 
@@ -213,13 +213,17 @@ public class StatsOptimizer extends Transform {
 
     private boolean hasNullOrConstantGbyKey(GroupByOperator gbyOp) {
       GroupByDesc gbyDesc = gbyOp.getConf();
+      int numCols = gbyDesc.getOutputColumnNames().size();
+      int aggCols = gbyDesc.getAggregators().size();
       // If the Group by operator has null key
-      if (gbyDesc.getOutputColumnNames().size() ==
-        gbyDesc.getAggregators().size()) {
+      if (numCols == aggCols) {
         return true;
       }
-      for (ExprNodeDesc en :gbyDesc.getKeys()) {
-        if (!(en instanceof ExprNodeConstantDesc)) {
+      // If the Gby key is a constant
+      List<String> dpCols = gbyOp.getSchema().getColumnNames().subList(0, numCols - aggCols);
+      for(String dpCol : dpCols) {
+        ExprNodeDesc end = ExprNodeDescUtils.findConstantExprOrigin(dpCol, gbyOp);
+        if (!(end instanceof ExprNodeConstantDesc)) {
           return false;
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
index c6d1d46..4c154d0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
@@ -35,12 +35,12 @@ import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 public class HiveRelOptUtil extends RelOptUtil {
@@ -105,23 +105,6 @@ public class HiveRelOptUtil extends RelOptUtil {
     final RelOptCluster cluster = inputs.get(0).getCluster();
     final RexBuilder rexBuilder = cluster.getRexBuilder();
 
-    final ImmutableBitSet[] inputsRange = new ImmutableBitSet[inputs.size()];
-    int totalFieldCount = 0;
-    for (int i = 0; i < inputs.size(); i++) {
-      final int firstField = totalFieldCount + sysFieldCount;
-      totalFieldCount = firstField + inputs.get(i).getRowType().getFieldCount();
-      inputsRange[i] = ImmutableBitSet.range(firstField, totalFieldCount);
-    }
-
-    // adjustment array
-    int[] adjustments = new int[totalFieldCount];
-    for (int i = 0; i < inputs.size(); i++) {
-      final int adjustment = inputsRange[i].nextSetBit(0);
-      for (int j = adjustment; j < inputsRange[i].length(); j++) {
-        adjustments[j] = -adjustment;
-      }
-    }
-
     if (condition instanceof RexCall) {
       RexCall call = (RexCall) condition;
       if (call.getOperator() == SqlStdOperatorTable.AND) {
@@ -165,6 +148,14 @@ public class HiveRelOptUtil extends RelOptUtil {
         final ImmutableBitSet projRefs0 = InputFinder.bits(op0);
         final ImmutableBitSet projRefs1 = InputFinder.bits(op1);
 
+        final ImmutableBitSet[] inputsRange = new ImmutableBitSet[inputs.size()];
+        int totalFieldCount = 0;
+        for (int i = 0; i < inputs.size(); i++) {
+          final int firstField = totalFieldCount + sysFieldCount;
+          totalFieldCount = firstField + inputs.get(i).getRowType().getFieldCount();
+          inputsRange[i] = ImmutableBitSet.range(firstField, totalFieldCount);
+        }
+
         boolean foundBothInputs = false;
         for (int i = 0; i < inputs.size() && !foundBothInputs; i++) {
           if (projRefs0.intersects(inputsRange[i])
@@ -196,6 +187,15 @@ public class HiveRelOptUtil extends RelOptUtil {
         }
 
         if ((leftKey != null) && (rightKey != null)) {
+          // adjustment array
+          int[] adjustments = new int[totalFieldCount];
+          for (int i = 0; i < inputs.size(); i++) {
+            final int adjustment = inputsRange[i].nextSetBit(0);
+            for (int j = adjustment; j < inputsRange[i].length(); j++) {
+              adjustments[j] = -adjustment;
+            }
+          }
+
           // replace right Key input ref
           rightKey =
               rightKey.accept(

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
index 2f309f3..6933fec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexUtil.java
@@ -19,8 +19,11 @@ package org.apache.hadoop.hive.ql.optimizer.calcite;
 
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.calcite.linq4j.Ord;
@@ -29,12 +32,16 @@ import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexShuttle;
 import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlOperator;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
 import org.apache.calcite.sql.type.SqlTypeName;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Util;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -42,6 +49,8 @@ import com.google.common.collect.Lists;
 
 public class HiveRexUtil {
 
+  protected static final Logger LOG = LoggerFactory.getLogger(HiveRexUtil.class);
+
   /**
    * Simplifies a boolean expression.
    *
@@ -54,13 +63,20 @@ public class HiveRexUtil {
    * </ul>
    */
   public static RexNode simplify(RexBuilder rexBuilder, RexNode e) {
+    return simplify(rexBuilder, e, false);
+  }
+
+  public static RexNode simplify(RexBuilder rexBuilder, RexNode e,
+          boolean unknownAsFalse) {
     switch (e.getKind()) {
     case AND:
-      return simplifyAnd(rexBuilder, (RexCall) e);
+      return simplifyAnd(rexBuilder, (RexCall) e, unknownAsFalse);
     case OR:
       return simplifyOr(rexBuilder, (RexCall) e);
+    case NOT:
+      return simplifyNot(rexBuilder, (RexCall) e);
     case CASE:
-      return simplifyCase(rexBuilder, (RexCall) e);
+      return simplifyCase(rexBuilder, (RexCall) e, unknownAsFalse);
     case IS_NULL:
       return ((RexCall) e).getOperands().get(0).getType().isNullable()
           ? e : rexBuilder.makeLiteral(false);
@@ -72,32 +88,67 @@ public class HiveRexUtil {
     }
   }
 
-  private static RexNode simplifyCase(RexBuilder rexBuilder, RexCall call) {
+  private static RexNode simplifyNot(RexBuilder rexBuilder, RexCall call) {
+    final RexNode a = call.getOperands().get(0);
+    switch (a.getKind()) {
+    case NOT:
+      // NOT NOT x ==> x
+      return simplify(rexBuilder, ((RexCall) a).getOperands().get(0));
+    }
+    final SqlKind negateKind = a.getKind().negate();
+    if (a.getKind() != negateKind) {
+      return simplify(rexBuilder,
+          rexBuilder.makeCall(op(negateKind),
+              ImmutableList.of(((RexCall) a).getOperands().get(0))));
+    }
+    final SqlKind negateKind2 = negate(a.getKind());
+    if (a.getKind() != negateKind2) {
+      return simplify(rexBuilder,
+          rexBuilder.makeCall(op(negateKind2), ((RexCall) a).getOperands()));
+    }
+    return call;
+  }
+
+  private static RexNode simplifyCase(RexBuilder rexBuilder, RexCall call,
+          boolean unknownAsFalse) {
     final List<RexNode> operands = call.getOperands();
     final List<RexNode> newOperands = new ArrayList<>();
+    final Set<String> values = new HashSet<>();
+    boolean constainsNullableCase = false;
     for (int i = 0; i < operands.size(); i++) {
       RexNode operand = operands.get(i);
       if (RexUtil.isCasePredicate(call, i)) {
         if (operand.isAlwaysTrue()) {
           // Predicate is always TRUE. Make value the ELSE and quit.
           newOperands.add(operands.get(i + 1));
+          if (operand.getType().isNullable()) {
+            constainsNullableCase = true;
+          }
           break;
-        }
-        if (operand.isAlwaysFalse()) {
-          // Predicate is always FALSE. Skip predicate and value.
+        } else if (operand.isAlwaysFalse() || RexUtil.isNull(operand)) {
+          // Predicate is always FALSE or NULL. Skip predicate and value.
           ++i;
           continue;
         }
+        if (operand.getType().isNullable()) {
+          constainsNullableCase = true;
+        }
+      } else {
+        if (unknownAsFalse && RexUtil.isNull(operand)) {
+          values.add(rexBuilder.makeLiteral(false).toString());
+        } else {
+          values.add(operand.toString());
+        }
       }
       newOperands.add(operand);
     }
     assert newOperands.size() % 2 == 1;
-    switch (newOperands.size()) {
-    case 1:
-      return rexBuilder.makeCast(call.getType(), newOperands.get(0));
+    if (newOperands.size() == 1 || values.size() == 1) {
+      return rexBuilder.makeCast(call.getType(), newOperands.get(newOperands.size() - 1));
     }
   trueFalse:
-    if (call.getType().getSqlTypeName() == SqlTypeName.BOOLEAN) {
+    if (call.getType().getSqlTypeName() == SqlTypeName.BOOLEAN &&
+            (!constainsNullableCase || unknownAsFalse)) {
       // Optimize CASE where every branch returns constant true or constant
       // false:
       //   CASE
@@ -110,7 +161,8 @@ public class HiveRexUtil {
           casePairs(rexBuilder, newOperands);
       for (Ord<Pair<RexNode, RexNode>> pair : Ord.zip(pairs)) {
         if (!pair.e.getValue().isAlwaysTrue()
-            && !pair.e.getValue().isAlwaysFalse()) {
+            && !pair.e.getValue().isAlwaysFalse()
+            && (!unknownAsFalse || !RexUtil.isNull(pair.e.getValue()))) {
           break trueFalse;
         }
       }
@@ -145,33 +197,76 @@ public class HiveRexUtil {
     return builder.build();
   }
 
-  public static RexNode simplifyAnd(RexBuilder rexBuilder, RexCall e) {
-    final List<RexNode> terms = RelOptUtil.conjunctions(e);
+  public static RexNode simplifyAnd(RexBuilder rexBuilder, RexCall e,
+          boolean unknownAsFalse) {
+    final List<RexNode> terms = new ArrayList<>();
     final List<RexNode> notTerms = new ArrayList<>();
-    final List<RexNode> negatedTerms = new ArrayList<>();
-    final List<RexNode> nullOperands = new ArrayList<>();
-    final List<RexNode> notNullOperands = new ArrayList<>();
-    final Set<RexNode> comparedOperands = new HashSet<>();
+    RelOptUtil.decomposeConjunction(e, terms, notTerms);
+    if (unknownAsFalse) {
+      return simplifyAnd2ForUnknownAsFalse(rexBuilder, terms, notTerms);
+    }
+    return simplifyAnd2(rexBuilder, terms, notTerms);
+  }
+
+  public static RexNode simplifyAnd2(RexBuilder rexBuilder,
+      List<RexNode> terms, List<RexNode> notTerms) {
+    if (terms.contains(rexBuilder.makeLiteral(false))) {
+      return rexBuilder.makeLiteral(false);
+    }
+    if (terms.isEmpty() && notTerms.isEmpty()) {
+      return rexBuilder.makeLiteral(true);
+    }
+    if (terms.size() == 1 && notTerms.isEmpty()) {
+      // Make sure "x OR y OR x" (a single-term conjunction) gets simplified.
+      return simplify(rexBuilder, terms.get(0));
+    }
+    // If one of the not-disjunctions is a disjunction that is wholly
+    // contained in the disjunctions list, the expression is not
+    // satisfiable.
+    //
+    // Example #1. x AND y AND z AND NOT (x AND y)  - not satisfiable
+    // Example #2. x AND y AND NOT (x AND y)        - not satisfiable
+    // Example #3. x AND y AND NOT (x AND y AND z)  - may be satisfiable
+    for (RexNode notDisjunction : notTerms) {
+      final List<RexNode> terms2 = RelOptUtil.conjunctions(notDisjunction);
+      if (terms.containsAll(terms2)) {
+        return rexBuilder.makeLiteral(false);
+      }
+    }
+    // Add the NOT disjunctions back in.
+    for (RexNode notDisjunction : notTerms) {
+      terms.add(
+          simplify(rexBuilder,
+              rexBuilder.makeCall(SqlStdOperatorTable.NOT, notDisjunction)));
+    }
+    return RexUtil.composeConjunction(rexBuilder, terms, false);
+  }
+
+  /** As {@link #simplifyAnd2(RexBuilder, List, List)} but we assume that if the expression returns
+   * UNKNOWN it will be interpreted as FALSE. */
+  public static RexNode simplifyAnd2ForUnknownAsFalse(RexBuilder rexBuilder,
+      List<RexNode> terms, List<RexNode> notTerms) {
+    if (terms.contains(rexBuilder.makeLiteral(false))) {
+      return rexBuilder.makeLiteral(false);
+    }
+    if (terms.isEmpty() && notTerms.isEmpty()) {
+      return rexBuilder.makeLiteral(true);
+    }
+    if (terms.size() == 1 && notTerms.isEmpty()) {
+      // Make sure "x OR y OR x" (a single-term conjunction) gets simplified.
+      return simplify(rexBuilder, terms.get(0), true);
+    }
+    // Try to simplify the expression
+    final Set<String> negatedTerms = new HashSet<>();
+    final Set<String> nullOperands = new HashSet<>();
+    final Set<RexNode> notNullOperands = new LinkedHashSet<>();
+    final Set<String> comparedOperands = new HashSet<>();
     for (int i = 0; i < terms.size(); i++) {
       final RexNode term = terms.get(i);
       if (!HiveCalciteUtil.isDeterministic(term)) {
         continue;
       }
       switch (term.getKind()) {
-      case NOT:
-        notTerms.add(
-            ((RexCall) term).getOperands().get(0));
-        terms.remove(i);
-        --i;
-        break;
-      case LITERAL:
-        if (!RexLiteral.booleanValue(term)) {
-          return term; // false
-        } else {
-          terms.remove(i);
-          --i;
-        }
-        break;
       case EQUALS:
       case NOT_EQUALS:
       case LESS_THAN:
@@ -180,53 +275,48 @@ public class HiveRexUtil {
       case GREATER_THAN_OR_EQUAL:
         RexCall call = (RexCall) term;
         RexNode left = call.getOperands().get(0);
-        comparedOperands.add(left);
+        comparedOperands.add(left.toString());
         // if it is a cast, we include the inner reference
         if (left.getKind() == SqlKind.CAST) {
           RexCall leftCast = (RexCall) left;
-          comparedOperands.add(leftCast.getOperands().get(0));
+          comparedOperands.add(leftCast.getOperands().get(0).toString());
         }
         RexNode right = call.getOperands().get(1);
-        comparedOperands.add(right);
+        comparedOperands.add(right.toString());
         // if it is a cast, we include the inner reference
         if (right.getKind() == SqlKind.CAST) {
           RexCall rightCast = (RexCall) right;
-          comparedOperands.add(rightCast.getOperands().get(0));
+          comparedOperands.add(rightCast.getOperands().get(0).toString());
         }
-        // Assume we have the expression a > 5.
-        // Then we can derive the negated term: NOT(a <= 5).
+        // Assume the expression a > 5 is part of a Filter condition.
+        // Then we can derive the negated term: a <= 5.
         // But as the comparison is string based and thus operands order dependent,
-        // we should also add the inverted negated term: NOT(5 >= a).
+        // we should also add the inverted negated term: 5 >= a.
         // Observe that for creating the inverted term we invert the list of operands.
-        RexCall negatedTerm = negate(rexBuilder, call);
+        RexNode negatedTerm = negate(rexBuilder, call);
         if (negatedTerm != null) {
-          negatedTerms.add(negatedTerm);
-          RexCall invertNegatedTerm = invert(rexBuilder, negatedTerm);
+          negatedTerms.add(negatedTerm.toString());
+          RexNode invertNegatedTerm = invert(rexBuilder, (RexCall) negatedTerm);
           if (invertNegatedTerm != null) {
-            negatedTerms.add(invertNegatedTerm);
+            negatedTerms.add(invertNegatedTerm.toString());
           }
         }
         break;
       case IN:
-        comparedOperands.add(((RexCall) term).operands.get(0));
+        comparedOperands.add(((RexCall) term).operands.get(0).toString());
         break;
       case BETWEEN:
-        comparedOperands.add(((RexCall) term).operands.get(1));
+        comparedOperands.add(((RexCall) term).operands.get(1).toString());
         break;
       case IS_NOT_NULL:
-        notNullOperands.add(
-                ((RexCall) term).getOperands().get(0));
+        notNullOperands.add(((RexCall) term).getOperands().get(0));
         terms.remove(i);
         --i;
         break;
       case IS_NULL:
-        nullOperands.add(
-                ((RexCall) term).getOperands().get(0));
+        nullOperands.add(((RexCall) term).getOperands().get(0).toString());
       }
     }
-    if (terms.isEmpty() && notTerms.isEmpty() && notNullOperands.isEmpty()) {
-      return rexBuilder.makeLiteral(true);
-    }
     // If one column should be null and is in a comparison predicate,
     // it is not satisfiable.
     // Example. IS NULL(x) AND x < 5  - not satisfiable
@@ -237,10 +327,9 @@ public class HiveRexUtil {
     //
     // Example. IS NOT NULL(x) AND x < 5  : x < 5
     for (RexNode operand : notNullOperands) {
-      if (!comparedOperands.contains(operand)) {
+      if (!comparedOperands.contains(operand.toString())) {
         terms.add(
-            rexBuilder.makeCall(
-                SqlStdOperatorTable.IS_NOT_NULL, operand));
+            rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_NULL, operand));
       }
     }
     // If one of the not-disjunctions is a disjunction that is wholly
@@ -253,23 +342,24 @@ public class HiveRexUtil {
     final Set<String> termsSet = new HashSet<String>(
             Lists.transform(terms, HiveCalciteUtil.REX_STR_FN));
     for (RexNode notDisjunction : notTerms) {
-      final Set<String> notSet = new HashSet<String>(
-              Lists.transform(RelOptUtil.conjunctions(notDisjunction), HiveCalciteUtil.REX_STR_FN));
-      if (termsSet.containsAll(notSet)) {
+      if (!HiveCalciteUtil.isDeterministic(notDisjunction)) {
+        continue;
+      }
+      final List<String> terms2Set = Lists.transform(
+              RelOptUtil.conjunctions(notDisjunction), HiveCalciteUtil.REX_STR_FN);
+      if (termsSet.containsAll(terms2Set)) {
         return rexBuilder.makeLiteral(false);
       }
     }
     // Add the NOT disjunctions back in.
     for (RexNode notDisjunction : notTerms) {
       terms.add(
-          rexBuilder.makeCall(
-              SqlStdOperatorTable.NOT, notDisjunction));
+          simplify(rexBuilder,
+              rexBuilder.makeCall(SqlStdOperatorTable.NOT, notDisjunction), true));
     }
-    // The negated terms
-    for (RexNode notDisjunction : negatedTerms) {
-      final Set<String> notSet = new HashSet<String>(
-              Lists.transform(RelOptUtil.conjunctions(notDisjunction), HiveCalciteUtil.REX_STR_FN));
-      if (termsSet.containsAll(notSet)) {
+    // The negated terms: only deterministic expressions
+    for (String negatedTerm : negatedTerms) {
+      if (termsSet.contains(negatedTerm)) {
         return rexBuilder.makeLiteral(false);
       }
     }
@@ -284,11 +374,13 @@ public class HiveRexUtil {
       final RexNode term = terms.get(i);
       switch (term.getKind()) {
       case LITERAL:
-        if (RexLiteral.booleanValue(term)) {
-          return term; // true
-        } else {
-          terms.remove(i);
-          --i;
+        if (!RexLiteral.isNullLiteral(term)) {
+          if (RexLiteral.booleanValue(term)) {
+            return term; // true
+          } else {
+            terms.remove(i);
+            --i;
+          }
         }
       }
     }
@@ -298,21 +390,34 @@ public class HiveRexUtil {
   private static RexCall negate(RexBuilder rexBuilder, RexCall call) {
     switch (call.getKind()) {
       case EQUALS:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.NOT_EQUALS, call.getOperands());
       case NOT_EQUALS:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, call.getOperands());
       case LESS_THAN:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, call.getOperands());
       case GREATER_THAN:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.LESS_THAN_OR_EQUAL, call.getOperands());
       case LESS_THAN_OR_EQUAL:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN, call.getOperands());
       case GREATER_THAN_OR_EQUAL:
-        return (RexCall) rexBuilder.makeCall(SqlStdOperatorTable.LESS_THAN, call.getOperands());
+        return (RexCall) rexBuilder.makeCall(op(negate(call.getKind())), call.getOperands());
     }
     return null;
   }
 
+  private static SqlKind negate(SqlKind kind) {
+    switch (kind) {
+      case EQUALS:
+        return SqlKind.NOT_EQUALS;
+      case NOT_EQUALS:
+        return SqlKind.EQUALS;
+      case LESS_THAN:
+        return SqlKind.GREATER_THAN_OR_EQUAL;
+      case GREATER_THAN:
+        return SqlKind.LESS_THAN_OR_EQUAL;
+      case LESS_THAN_OR_EQUAL:
+        return SqlKind.GREATER_THAN;
+      case GREATER_THAN_OR_EQUAL:
+        return SqlKind.LESS_THAN;
+    }
+    return kind;
+  }
+
   private static RexCall invert(RexBuilder rexBuilder, RexCall call) {
     switch (call.getKind()) {
       case LESS_THAN:
@@ -330,4 +435,81 @@ public class HiveRexUtil {
     }
     return null;
   }
+
+  private static SqlOperator op(SqlKind kind) {
+    switch (kind) {
+    case IS_FALSE:
+      return SqlStdOperatorTable.IS_FALSE;
+    case IS_TRUE:
+      return SqlStdOperatorTable.IS_TRUE;
+    case IS_UNKNOWN:
+      return SqlStdOperatorTable.IS_UNKNOWN;
+    case IS_NULL:
+      return SqlStdOperatorTable.IS_NULL;
+    case IS_NOT_FALSE:
+      return SqlStdOperatorTable.IS_NOT_FALSE;
+    case IS_NOT_TRUE:
+      return SqlStdOperatorTable.IS_NOT_TRUE;
+    case IS_NOT_NULL:
+      return SqlStdOperatorTable.IS_NOT_NULL;
+    case EQUALS:
+      return SqlStdOperatorTable.EQUALS;
+    case NOT_EQUALS:
+      return SqlStdOperatorTable.NOT_EQUALS;
+    case LESS_THAN:
+      return SqlStdOperatorTable.LESS_THAN;
+    case GREATER_THAN:
+      return SqlStdOperatorTable.GREATER_THAN;
+    case LESS_THAN_OR_EQUAL:
+      return SqlStdOperatorTable.LESS_THAN_OR_EQUAL;
+    case GREATER_THAN_OR_EQUAL:
+      return SqlStdOperatorTable.GREATER_THAN_OR_EQUAL;
+    default:
+      throw new AssertionError(kind);
+    }
+  }
+
+  public static class ExprSimplifier extends RexShuttle {
+    private final RexBuilder rexBuilder;
+    private final boolean unknownAsFalse;
+    private final Map<RexNode,Boolean> unknownAsFalseMap;
+
+    public ExprSimplifier(RexBuilder rexBuilder, boolean unknownAsFalse) {
+      this.rexBuilder = rexBuilder;
+      this.unknownAsFalse = unknownAsFalse;
+      this.unknownAsFalseMap = new HashMap<>();
+    }
+
+    @Override
+    public RexNode visitCall(RexCall call) {
+      Boolean unknownAsFalseCall = unknownAsFalse;
+      if (unknownAsFalseCall) {
+        switch (call.getKind()) {
+        case AND:
+        case CASE:
+          unknownAsFalseCall = this.unknownAsFalseMap.get(call);
+          if (unknownAsFalseCall == null) {
+            // Top operator
+            unknownAsFalseCall = true;
+          }
+          break;
+        default:
+          unknownAsFalseCall = false;
+        }
+        for (RexNode operand : call.operands) {
+          this.unknownAsFalseMap.put(operand, unknownAsFalseCall);
+        }
+      }
+      RexNode node = super.visitCall(call);
+      RexNode simplifiedNode = HiveRexUtil.simplify(rexBuilder, node, unknownAsFalseCall);
+      if (node == simplifiedNode) {
+        return node;
+      }
+      if (simplifiedNode.getType().equals(call.getType())) {
+        return simplifiedNode;
+      }
+      return rexBuilder.makeCast(call.getType(), simplifiedNode, true);
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregatePullUpConstantsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregatePullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregatePullUpConstantsRule.java
new file mode 100644
index 0000000..370c0ec
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregatePullUpConstantsRule.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.rules.AggregateProjectPullUpConstantsRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
+
+public class HiveAggregatePullUpConstantsRule extends AggregateProjectPullUpConstantsRule {
+
+  public static final HiveAggregatePullUpConstantsRule INSTANCE =
+          new HiveAggregatePullUpConstantsRule();
+
+  public HiveAggregatePullUpConstantsRule() {
+    super(HiveAggregate.class, RelNode.class,
+            HiveRelFactories.HIVE_BUILDER, "HiveAggregatePullUpConstantsRule");
+  }
+
+  @Override
+  public boolean matches(RelOptRuleCall call) {
+    final Aggregate aggregate = call.rel(0);
+    // Rule cannot be applied if there are GroupingSets
+    if (aggregate.indicator) {
+      return false;
+    }
+    return super.matches(call);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectFilterPullUpConstantsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectFilterPullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectFilterPullUpConstantsRule.java
new file mode 100644
index 0000000..defab09
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveProjectFilterPullUpConstantsRule.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Filter;
+import org.apache.calcite.rel.core.Project;
+import org.apache.calcite.rex.RexBuilder;
+import org.apache.calcite.rex.RexCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.rex.RexShuttle;
+import org.apache.calcite.rex.RexUtil;
+import org.apache.calcite.tools.RelBuilder;
+import org.apache.calcite.tools.RelBuilderFactory;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Planner rule that infers constant expressions from Filter into
+ * a Project operator.
+ */
+public class HiveProjectFilterPullUpConstantsRule extends RelOptRule {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(
+          HiveProjectFilterPullUpConstantsRule.class);
+
+  public static final HiveProjectFilterPullUpConstantsRule INSTANCE =
+          new HiveProjectFilterPullUpConstantsRule(HiveProject.class, HiveFilter.class,
+                  HiveRelFactories.HIVE_BUILDER);
+
+  public HiveProjectFilterPullUpConstantsRule(
+      Class<? extends Project> projectClass,
+      Class<? extends Filter> filterClass,
+      RelBuilderFactory relBuilderFactory) {
+    super(operand(projectClass,
+            operand(filterClass, any())),
+            relBuilderFactory, null);
+  }
+
+  @Override
+  public boolean matches(RelOptRuleCall call) {
+    final Filter filterRel = call.rel(1);
+    RexNode condition = filterRel.getCondition();
+    if (!HiveCalciteUtil.isDeterministic(condition)) {
+      return false;
+    }
+
+    return super.matches(call);
+  }
+
+  public void onMatch(RelOptRuleCall call) {
+    final Project project = call.rel(0);
+    final Filter filter = call.rel(1);
+    final RelBuilder builder = call.builder();
+
+    List<RexNode> projects = project.getChildExps();
+    List<RexNode> newProjects = rewriteProjects(projects, filter.getCondition(), builder);
+    if (newProjects == null) {
+      return;
+    }
+
+    RelNode newProjRel = builder.push(filter)
+            .project(newProjects, project.getRowType().getFieldNames()).build();
+    call.transformTo(newProjRel);
+  }
+
+  // Rewrite projects to replace column references by constants when possible
+  @SuppressWarnings("incomplete-switch")
+  private static List<RexNode> rewriteProjects(List<RexNode> projects,
+          RexNode newPushedCondition, RelBuilder relBuilder) {
+    final List<RexNode> conjunctions = RelOptUtil.conjunctions(newPushedCondition);
+    final Map<String, RexNode> conditions = new HashMap<String, RexNode>();
+    for (RexNode conjunction: conjunctions) {
+      // 1.1. If it is not a RexCall, we continue
+      if (!(conjunction instanceof RexCall)) {
+        continue;
+      }
+      // 1.2. We extract the information that we need
+      RexCall conjCall = (RexCall) conjunction;
+      switch (conjCall.getOperator().getKind()) {
+        case EQUALS:
+          if (!(RexUtil.isConstant(conjCall.operands.get(0))) &&
+                  RexUtil.isConstant(conjCall.operands.get(1))) {
+            conditions.put(conjCall.operands.get(0).toString(), conjCall.operands.get(1));
+          } else if (!(RexUtil.isConstant(conjCall.operands.get(1))) &&
+                  RexUtil.isConstant(conjCall.operands.get(0))) {
+            conditions.put(conjCall.operands.get(1).toString(), conjCall.operands.get(0));
+          }
+          break;
+        case IS_NULL:
+          conditions.put(conjCall.operands.get(0).toString(),
+                  relBuilder.getRexBuilder().makeNullLiteral(
+                          conjCall.operands.get(0).getType().getSqlTypeName()));
+      }
+    }
+
+    RexReplacer replacer = new RexReplacer(relBuilder.getRexBuilder(), conditions);
+    List<RexNode> newProjects = Lists.newArrayList(projects);
+    replacer.mutate(newProjects);
+    if (replacer.replaced) {
+      return newProjects;
+    }
+    return null;
+  }
+
+  protected static class RexReplacer extends RexShuttle {
+    private final RexBuilder rexBuilder;
+    private final Map<String, RexNode> replacements;
+    private boolean replaced;
+
+    RexReplacer(
+        RexBuilder rexBuilder,
+        Map<String, RexNode> replacements) {
+      this.rexBuilder = rexBuilder;
+      this.replacements = replacements;
+      this.replaced = false;
+    }
+
+    @Override public RexNode visitInputRef(RexInputRef inputRef) {
+      RexNode node = visit(inputRef);
+      if (node == null) {
+        return super.visitInputRef(inputRef);
+      }
+      this.replaced = true;
+      return node;
+    }
+
+    @Override public RexNode visitCall(RexCall call) {
+      RexNode node = visit(call);
+      if (node != null) {
+        this.replaced = true;
+        return node;
+      }
+      return super.visitCall(call);
+    }
+
+    private RexNode visit(final RexNode call) {
+      RexNode replacement = replacements.get(call.toString());
+      if (replacement == null) {
+        return null;
+      }
+      if (replacement.getType().equals(call.getType())) {
+        return replacement;
+      }
+      return rexBuilder.makeCast(call.getType(), replacement, true);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
index 2fe9b75..514ae62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveReduceExpressionsRule.java
@@ -59,11 +59,15 @@ import org.apache.calcite.tools.RelBuilderFactory;
 import org.apache.calcite.util.Pair;
 import org.apache.calcite.util.Stacks;
 import org.apache.calcite.util.Util;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexUtil;
+import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRexUtil.ExprSimplifier;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.Lists;
@@ -80,6 +84,9 @@ import com.google.common.collect.Lists;
  * </ul>
  */
 public abstract class HiveReduceExpressionsRule extends RelOptRule {
+
+  protected static final Logger LOG = LoggerFactory.getLogger(HiveReduceExpressionsRule.class);
+
   //~ Static fields/initializers ---------------------------------------------
 
   /**
@@ -125,17 +132,23 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
 
     @Override public void onMatch(RelOptRuleCall call) {
       final Filter filter = call.rel(0);
-      final RexBuilder rexBuilder = filter.getCluster().getRexBuilder();
-
-      RexNode newConditionExp = HiveRexUtil.simplify(rexBuilder, filter.getCondition());
-      final List<RexNode> expList = Lists.newArrayList(newConditionExp);
-      boolean reduced = false;
+      final List<RexNode> expList =
+          Lists.newArrayList(filter.getCondition());
+      RexNode newConditionExp;
+      boolean reduced;
       final RelOptPredicateList predicates =
           RelMetadataQuery.instance().getPulledUpPredicates(filter.getInput());
-      if (reduceExpressions(filter, expList, predicates)) {
+      if (reduceExpressions(filter, expList, predicates, true)) {
         assert expList.size() == 1;
         newConditionExp = expList.get(0);
         reduced = true;
+      } else {
+        // No reduction, but let's still test the original
+        // predicate to see if it was already a constant,
+        // in which case we don't need any runtime decision
+        // about filtering.
+        newConditionExp = filter.getCondition();
+        reduced = false;
       }
 
       // Even if no reduction, let's still test the original
@@ -146,8 +159,7 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
       if (newConditionExp.isAlwaysTrue()) {
         call.transformTo(
             filter.getInput());
-      } else if (reduced
-              || !newConditionExp.toString().equals(filter.getCondition().toString())) {
+      } else if (reduced) {
         call.transformTo(call.builder().
             push(filter.getInput()).filter(newConditionExp).build());
       } else {
@@ -169,26 +181,8 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
       super(projectClass, relBuilderFactory, "HiveReduceExpressionsRule(Project)");
     }
 
-    public boolean matches(RelOptRuleCall call) {
-      Project project = call.rel(0);
-      HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class);
-
-      // If this operator has been visited already by the rule,
-      // we do not need to apply the optimization
-      if (registry != null && registry.getVisited(this).contains(project)) {
-        return false;
-      }
-
-      return true;
-    }
-
     @Override public void onMatch(RelOptRuleCall call) {
       Project project = call.rel(0);
-      // Register that we have visited this operator in this rule
-      HiveRulesRegistry registry = call.getPlanner().getContext().unwrap(HiveRulesRegistry.class);
-      if (registry != null) {
-        registry.registerVisited(this, project);
-      }
       final RelOptPredicateList predicates =
           RelMetadataQuery.instance().getPulledUpPredicates(project.getInput());
       final List<RexNode> expList =
@@ -196,9 +190,6 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
       if (reduceExpressions(project, expList, predicates)) {
         RelNode newProject = call.builder().push(project.getInput())
             .project(expList, project.getRowType().getFieldNames()).build();
-        if (registry != null) {
-          registry.registerVisited(this, newProject);
-        }
         call.transformTo(newProject);
 
         // New plan is absolutely better than old plan.
@@ -275,6 +266,43 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
    */
   protected static boolean reduceExpressions(RelNode rel, List<RexNode> expList,
       RelOptPredicateList predicates) {
+    return reduceExpressions(rel, expList, predicates, false);
+  }
+
+  /**
+   * Reduces a list of expressions.
+   *
+   * @param rel     Relational expression
+   * @param expList List of expressions, modified in place
+   * @param predicates Constraints known to hold on input expressions
+   * @param unknownAsFalse Whether UNKNOWN will be treated as FALSE
+   *
+   * @return whether reduction found something to change, and succeeded
+   */
+  protected static boolean reduceExpressions(RelNode rel, List<RexNode> expList,
+      RelOptPredicateList predicates, boolean unknownAsFalse) {
+    RexBuilder rexBuilder = rel.getCluster().getRexBuilder();
+
+    boolean reduced = reduceExpressionsInternal(rel, expList, predicates);
+
+    // Simplify preds in place
+    ExprSimplifier simplifier = new ExprSimplifier(rexBuilder, unknownAsFalse);
+    List<RexNode> expList2 = Lists.newArrayList(expList);
+    simplifier.mutate(expList2);
+    boolean simplified = false;
+    for (int i = 0; i < expList.size(); i++) {
+      if (!expList2.get(i).toString().equals(expList.get(i).toString())) {
+        expList.remove(i);
+        expList.add(i, expList2.get(i));
+        simplified = true;
+      }
+    }
+
+    return reduced || simplified;
+  }
+
+  protected static boolean reduceExpressionsInternal(RelNode rel, List<RexNode> expList,
+      RelOptPredicateList predicates) {
     RexBuilder rexBuilder = rel.getCluster().getRexBuilder();
 
     // Replace predicates on CASE to CASE on predicates.
@@ -284,8 +312,8 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
     final List<RexNode> constExps = Lists.newArrayList();
     List<Boolean> addCasts = Lists.newArrayList();
     final List<RexNode> removableCasts = Lists.newArrayList();
-    final ImmutableMap<RexNode, RexLiteral> constants =
-        predicateConstants(predicates);
+    final ImmutableMap<RexNode, RexNode> constants =
+        predicateConstants(RexNode.class, rexBuilder, predicates);
     findReducibleExps(rel.getCluster().getTypeFactory(), expList, constants,
         constExps, addCasts, removableCasts);
     if (constExps.isEmpty() && removableCasts.isEmpty()) {
@@ -347,6 +375,13 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
     final List<RexNode> reducedValues = Lists.newArrayList();
     executor.reduce(rexBuilder, constExps2, reducedValues);
 
+    // Use RexNode.digest to judge whether each newly generated RexNode
+    // is equivalent to the original one.
+    if (Lists.transform(constExps, HiveCalciteUtil.REX_STR_FN).equals(
+            Lists.transform(reducedValues, HiveCalciteUtil.REX_STR_FN))) {
+      return false;
+    }
+
     // For Project, we have to be sure to preserve the result
     // types, so always cast regardless of the expression type.
     // For other RelNodes like Filter, in general, this isn't necessary,
@@ -384,7 +419,7 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
    * @param removableCasts returns the list of cast expressions where the cast
    */
   protected static void findReducibleExps(RelDataTypeFactory typeFactory,
-      List<RexNode> exps, ImmutableMap<RexNode, RexLiteral> constants,
+      List<RexNode> exps, ImmutableMap<RexNode, RexNode> constants,
       List<RexNode> constExps, List<Boolean> addCasts,
       List<RexNode> removableCasts) {
     ReducibleExprLocator gardener =
@@ -437,18 +472,26 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
   private static <C extends RexNode> void gatherConstraints(Class<C> clazz,
       RexNode predicate, Map<RexNode, C> map, Set<RexNode> excludeSet,
       RexBuilder rexBuilder) {
-    if (predicate.getKind() != SqlKind.EQUALS) {
+    if (predicate.getKind() != SqlKind.EQUALS
+            && predicate.getKind() != SqlKind.IS_NULL) {
       decompose(excludeSet, predicate);
       return;
     }
     final List<RexNode> operands = ((RexCall) predicate).getOperands();
-    if (operands.size() != 2) {
+    if (operands.size() != 2 && predicate.getKind() == SqlKind.EQUALS) {
       decompose(excludeSet, predicate);
       return;
     }
     // if it reaches here, we have rexNode equals rexNode
-    final RexNode left = operands.get(0);
-    final RexNode right = operands.get(1);
+    final RexNode left;
+    final RexNode right;
+    if (predicate.getKind() == SqlKind.EQUALS) {
+      left = operands.get(0);
+      right = operands.get(1);
+    } else {
+      left = operands.get(0);
+      right = rexBuilder.makeNullLiteral(left.getType().getSqlTypeName());
+    }
     // note that literals are immutable too and they can only be compared through
     // values.
     gatherConstraint(clazz, left, right, map, excludeSet, rexBuilder);
@@ -520,33 +563,6 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
       }
     }
   }
-  
-  protected static ImmutableMap<RexNode, RexLiteral> predicateConstants(
-      RelOptPredicateList predicates) {
-    // We cannot use an ImmutableMap.Builder here. If there are multiple entries
-    // with the same key (e.g. "WHERE deptno = 1 AND deptno = 2"), it doesn't
-    // matter which we take, so the latter will replace the former.
-    // The basic idea is to find all the pairs of RexNode = RexLiteral
-    // (1) If 'predicates' contain a non-EQUALS, we bail out.
-    // (2) It is OK if a RexNode is equal to the same RexLiteral several times,
-    // (e.g. "WHERE deptno = 1 AND deptno = 1")
-    // (3) It will return false if there are inconsistent constraints (e.g.
-    // "WHERE deptno = 1 AND deptno = 2")
-    final Map<RexNode, RexLiteral> map = new HashMap<>();
-    final Set<RexNode> excludeSet = new HashSet<>();
-    for (RexNode predicate : predicates.pulledUpPredicates) {
-      gatherConstraints(map, predicate, excludeSet);
-    }
-    final ImmutableMap.Builder<RexNode, RexLiteral> builder =
-        ImmutableMap.builder();
-    for (Map.Entry<RexNode, RexLiteral> entry : map.entrySet()) {
-      RexNode rexNode = entry.getKey();
-      if (!overlap(rexNode, excludeSet)) {
-        builder.put(rexNode, entry.getValue());
-      }
-    }
-    return builder.build();
-  }
 
   private static boolean overlap(RexNode rexNode, Set<RexNode> set) {
     if (rexNode instanceof RexCall) {
@@ -573,46 +589,6 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
     }
   }
 
-  private static void gatherConstraints(Map<RexNode, RexLiteral> map,
-      RexNode predicate, Set<RexNode> excludeSet) {
-    if (predicate.getKind() != SqlKind.EQUALS) {
-      decompose(excludeSet, predicate);
-      return;
-    }
-    final List<RexNode> operands = ((RexCall) predicate).getOperands();
-    if (operands.size() != 2) {
-      decompose(excludeSet, predicate);
-      return;
-    }
-    // if it reaches here, we have rexNode equals rexNode
-    final RexNode left = operands.get(0);
-    final RexNode right = operands.get(1);
-    // note that literals are immutable too and they can only be compared through
-    // values.
-    if (right instanceof RexLiteral && !excludeSet.contains(left)) {
-      RexLiteral existedValue = map.get(left);
-      if (existedValue == null) {
-        map.put(left, (RexLiteral) right);
-      } else {
-        if (!existedValue.getValue().equals(((RexLiteral) right).getValue())) {
-          // we found conflict values.
-          map.remove(left);
-          excludeSet.add(left);
-        }
-      }
-    } else if (left instanceof RexLiteral && !excludeSet.contains(right)) {
-      RexLiteral existedValue = map.get(right);
-      if (existedValue == null) {
-        map.put(right, (RexLiteral) left);
-      } else {
-        if (!existedValue.getValue().equals(((RexLiteral) left).getValue())) {
-          map.remove(right);
-          excludeSet.add(right);
-        }
-      }
-    }
-  }
-
   /** Pushes predicates into a CASE.
    *
    * <p>We have a loose definition of 'predicate': any boolean expression will
@@ -743,7 +719,7 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
 
     private final List<Constancy> stack;
 
-    private final ImmutableMap<RexNode, RexLiteral> constants;
+    private final ImmutableMap<RexNode, RexNode> constants;
 
     private final List<RexNode> constExprs;
 
@@ -754,7 +730,7 @@ public abstract class HiveReduceExpressionsRule extends RelOptRule {
     private final List<SqlOperator> parentCallTypeStack;
 
     ReducibleExprLocator(RelDataTypeFactory typeFactory,
-        ImmutableMap<RexNode, RexLiteral> constants, List<RexNode> constExprs,
+        ImmutableMap<RexNode, RexNode> constants, List<RexNode> constExprs,
         List<Boolean> addCasts, List<RexNode> removableCasts) {
       // go deep
       super(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
index 3be9b0a..ebfabac 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveSortLimitPullUpConstantsRule.java
@@ -92,8 +92,15 @@ public class HiveSortLimitPullUpConstantsRule extends RelOptRule {
       return;
     }
 
-    Map<RexNode, RexNode> constants = HiveReduceExpressionsRule.predicateConstants(
+    Map<RexNode, RexNode> conditionsExtracted = HiveReduceExpressionsRule.predicateConstants(
             RexNode.class, rexBuilder, predicates);
+    Map<RexNode, RexNode> constants = new HashMap<>();
+    for (int i = 0; i < count ; i++) {
+      RexNode expr = rexBuilder.makeInputRef(sort.getInput(), i);
+      if (conditionsExtracted.containsKey(expr)) {
+        constants.put(expr, conditionsExtracted.get(expr));
+      }
+    }
 
     // None of the expressions are constant. Nothing to do.
     if (constants.isEmpty()) {
@@ -102,13 +109,10 @@ public class HiveSortLimitPullUpConstantsRule extends RelOptRule {
 
     if (count == constants.size()) {
       // At least a single item in project is required.
-      final Map<RexNode, RexNode> map = new HashMap<>(constants);
-      map.remove(map.keySet().iterator().next());
-      constants = map;
+      constants.remove(constants.keySet().iterator().next());
     }
 
     // Create expressions for Project operators before and after the Sort
-    boolean atLeastOneConstant = false;
     List<RelDataTypeField> fields = sort.getInput().getRowType().getFieldList();
     List<Pair<RexNode, String>> newChildExprs = new ArrayList<>();
     List<RexNode> topChildExprs = new ArrayList<>();
@@ -117,7 +121,6 @@ public class HiveSortLimitPullUpConstantsRule extends RelOptRule {
       RexNode expr = rexBuilder.makeInputRef(sort.getInput(), i);
       RelDataTypeField field = fields.get(i);
       if (constants.containsKey(expr)) {
-        atLeastOneConstant = true;
         topChildExprs.add(constants.get(expr));
         topChildExprsFields.add(field.getName());
       } else {
@@ -127,11 +130,6 @@ public class HiveSortLimitPullUpConstantsRule extends RelOptRule {
       }
     }
 
-    // No constants were found
-    if (!atLeastOneConstant) {
-      return;
-    }
-
     // Update field collations
     final Mappings.TargetMapping mapping =
             RelOptUtil.permutation(Pair.left(newChildExprs), sort.getInput().getRowType()).inverse();

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
index 2552f87..f071ddd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveUnionPullUpConstantsRule.java
@@ -80,32 +80,30 @@ public class HiveUnionPullUpConstantsRule extends RelOptRule {
       return;
     }
 
-    Map<RexNode, RexNode> constants = HiveReduceExpressionsRule.predicateConstants(
+    Map<RexNode, RexNode> conditionsExtracted = HiveReduceExpressionsRule.predicateConstants(
             RexNode.class, rexBuilder, predicates);
+    Map<RexNode, RexNode> constants = new HashMap<>();
+    for (int i = 0; i < count ; i++) {
+      RexNode expr = rexBuilder.makeInputRef(union, i);
+      if (conditionsExtracted.containsKey(expr)) {
+        constants.put(expr, conditionsExtracted.get(expr));
+      }
+    }
 
     // None of the expressions are constant. Nothing to do.
     if (constants.isEmpty()) {
       return;
     }
 
-    if (count == constants.size()) {
-      // At least a single item in project is required.
-      final Map<RexNode, RexNode> map = new HashMap<>(constants);
-      map.remove(map.keySet().iterator().next());
-      constants = map;
-    }
-
     // Create expressions for Project operators before and after the Union
-    boolean atLeastOneConstant = false;
-    List<RelDataTypeField> fields = union.getRowType().getFieldList();
     List<Pair<RexNode, String>> newChildExprs = new ArrayList<>();
+    List<RelDataTypeField> fields = union.getRowType().getFieldList();
     List<RexNode> topChildExprs = new ArrayList<>();
     List<String> topChildExprsFields = new ArrayList<>();
     for (int i = 0; i < count ; i++) {
       RexNode expr = rexBuilder.makeInputRef(union, i);
       RelDataTypeField field = fields.get(i);
       if (constants.containsKey(expr)) {
-        atLeastOneConstant = true;
         topChildExprs.add(constants.get(expr));
         topChildExprsFields.add(field.getName());
       } else {
@@ -115,9 +113,10 @@ public class HiveUnionPullUpConstantsRule extends RelOptRule {
       }
     }
 
-    // No constants were found
-    if (!atLeastOneConstant) {
-      return;
+    if (newChildExprs.isEmpty()) {
+      // At least a single item in project is required.
+      newChildExprs.add(Pair.<RexNode,String>of(
+              topChildExprs.get(0), topChildExprsFields.get(0)));
     }
 
     // Update top Project positions


[45/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
new file mode 100644
index 0000000..fc8eb1c
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/vector_cast_constant.q.out
@@ -0,0 +1,216 @@
+PREHOOK: query: DROP TABLE over1k
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE over1k
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE over1korc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE over1korc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: -- data setup
+CREATE TABLE over1k(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k
+POSTHOOK: query: -- data setup
+CREATE TABLE over1k(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over1k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over1k
+PREHOOK: query: CREATE TABLE over1korc(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1korc
+POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1korc
+PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1korc
+POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1korc
+POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: EXPLAIN SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: over1korc
+                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: i (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: avg(50), avg(50.0), avg(50)
+                      keys: _col0 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
+        Reducer 3 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1korc
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1korc
+#### A masked pattern was here ####
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.7.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 1312e53..0000000
--- a/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,685 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                tag: 0
-                value expressions: _col1 (type: string)
-                auto parallelism: false
-          TableScan
-            alias: b
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), ds (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                null sort order: a
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                tag: 1
-                value expressions: _col1 (type: string), _col2 (type: string)
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-09
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-09
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /src [$hdt$_0:a]
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
-        /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:b]
-        /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:b]
-      Needs Tagging: true
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Outer Join 0 to 1
-          filter mappings:
-            1 [0, 1]
-          filter predicates:
-            0 
-            1 {(VALUE._col1 = '2008-04-08')}
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0) and (UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
-            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    columns _col0,_col1,_col2,_col3
-                    columns.types string:string:string:string
-                    escape.delim \
-                    hive.serialization.extend.additional.nesting.levels true
-                    serialization.escape.crlf true
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19
-PREHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                  tag: 0
-                  value expressions: _col1 (type: string)
-                  auto parallelism: false
-          TableScan
-            alias: b
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  null sort order: a
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                  tag: 1
-                  value expressions: _col1 (type: string)
-                  auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /src [$hdt$_0:a]
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
-      Needs Tagging: true
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Right Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3
-          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean)
-            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              GlobalTableId: 0
-#### A masked pattern was here ####
-              NumFilesPerFileSink: 1
-              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  properties:
-                    columns _col0,_col1,_col2,_col3
-                    columns.types string:string:string:string
-                    escape.delim \
-                    hive.serialization.extend.additional.nesting.levels true
-                    serialization.escape.crlf true
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              TotalFiles: 1
-              GatherStats: false
-              MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.8.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.8.out
deleted file mode 100644
index b9c1a66..0000000
--- a/ql/src/test/results/clientpositive/outer_join_ppr.q.java1.8.out
+++ /dev/null
@@ -1,855 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_FULLOUTERJOIN
-         TOK_TABREF
-            TOK_TABNAME
-               src
-            a
-         TOK_TABREF
-            TOK_TABNAME
-               srcpart
-            b
-         AND
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     a
-                  key
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  key
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  ds
-               '2008-04-08'
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               value
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               value
-      TOK_WHERE
-         AND
-            AND
-               AND
-                  >
-                     .
-                        TOK_TABLE_OR_COL
-                           a
-                        key
-                     10
-                  <
-                     .
-                        TOK_TABLE_OR_COL
-                           a
-                        key
-                     20
-               >
-                  .
-                     TOK_TABLE_OR_COL
-                        b
-                     key
-                  15
-            <
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  key
-               25
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), ds (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                tag: 0
-                value expressions: _col1 (type: string), _col2 (type: string)
-                auto parallelism: false
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                tag: 1
-                value expressions: _col1 (type: string)
-                auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-09
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-09
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /src [$hdt$_0:$hdt$_1:a]
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:b]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:b]
-        /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:$hdt$_0:b]
-        /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:$hdt$_0:b]
-      Needs Tagging: true
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Outer Join 0 to 1
-          filter mappings:
-            0 [1, 1]
-          filter predicates:
-            0 {(VALUE._col1 = '2008-04-08')}
-            1 
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((((UDFToDouble(_col3) > 10.0) and (UDFToDouble(_col3) < 20.0)) and (UDFToDouble(_col0) > 15.0)) and (UDFToDouble(_col0) < 25.0)) (type: boolean)
-            Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col0,_col1,_col2,_col3
-                      columns.types string:string:string:string
-                      escape.delim \
-                      hive.serialization.extend.nesting.levels true
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19
-PREHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_FULLOUTERJOIN
-         TOK_TABREF
-            TOK_TABNAME
-               src
-            a
-         TOK_TABREF
-            TOK_TABNAME
-               srcpart
-            b
-         =
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               value
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               value
-      TOK_WHERE
-         AND
-            AND
-               AND
-                  AND
-                     >
-                        .
-                           TOK_TABLE_OR_COL
-                              a
-                           key
-                        10
-                     <
-                        .
-                           TOK_TABLE_OR_COL
-                              a
-                           key
-                        20
-                  >
-                     .
-                        TOK_TABLE_OR_COL
-                           b
-                        key
-                     15
-               <
-                  .
-                     TOK_TABLE_OR_COL
-                        b
-                     key
-                  25
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  ds
-               '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                  tag: 0
-                  value expressions: _col1 (type: string)
-                  auto parallelism: false
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-              Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                  tag: 1
-                  value expressions: _col1 (type: string)
-                  auto parallelism: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: src
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.src
-              numFiles 1
-              numRows 500
-              rawDataSize 5312
-              serialization.ddl struct src { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                COLUMN_STATS_ACCURATE true
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.src
-                numFiles 1
-                numRows 500
-                rawDataSize 5312
-                serialization.ddl struct src { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                totalSize 5812
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src
-            name: default.src
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /src [$hdt$_0:$hdt$_1:a]
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:b]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:b]
-      Needs Tagging: true
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col3, _col4
-          Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((UDFToDouble(_col3) > 10.0) and (UDFToDouble(_col3) < 20.0)) (type: boolean)
-            Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 0
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    properties:
-                      columns _col0,_col1,_col2,_col3
-                      columns.types string:string:string:string
-                      escape.delim \
-                      hive.serialization.extend.nesting.levels true
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                TotalFiles: 1
-                GatherStats: false
-                MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19


[42/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
deleted file mode 100644
index 68943e1..0000000
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.7.out
+++ /dev/null
@@ -1,709 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      null sort order: a
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      tag: 0
-                      value expressions: _col1 (type: string)
-                      auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [a]
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string), ds (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      null sort order: a
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      tag: 1
-                      value expressions: _col1 (type: string), _col2 (type: string)
-                      auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-            Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [b]
-              /srcpart/ds=2008-04-08/hr=12 [b]
-              /srcpart/ds=2008-04-09/hr=11 [b]
-              /srcpart/ds=2008-04-09/hr=12 [b]
-        Reducer 2 
-            Needs Tagging: true
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Outer Join 0 to 1
-                filter mappings:
-                  1 [0, 1]
-                filter predicates:
-                  0 
-                  1 {(VALUE._col1 = '2008-04-08')}
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  isSamplingPred: false
-                  predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0) and (UDFToDouble(_col2) > 15.0) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
-                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        properties:
-                          columns _col0,_col1,_col2,_col3
-                          columns.types string:string:string:string
-                          escape.delim \
-                          hive.serialization.extend.additional.nesting.levels true
-                          serialization.escape.crlf true
-                          serialization.format 1
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    TotalFiles: 1
-                    GatherStats: false
-                    MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19
-PREHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        null sort order: a
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                        tag: 0
-                        value expressions: _col1 (type: string)
-                        auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [a]
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        null sort order: a
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                        tag: 1
-                        value expressions: _col1 (type: string)
-                        auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-            Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [b]
-              /srcpart/ds=2008-04-08/hr=12 [b]
-        Reducer 2 
-            Needs Tagging: true
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Right Outer Join0 to 1
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  isSamplingPred: false
-                  predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean)
-                  Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    GlobalTableId: 0
-#### A masked pattern was here ####
-                    NumFilesPerFileSink: 1
-                    Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        properties:
-                          columns _col0,_col1,_col2,_col3
-                          columns.types string:string:string:string
-                          escape.delim \
-                          hive.serialization.extend.additional.nesting.levels true
-                          serialization.escape.crlf true
-                          serialization.format 1
-                          serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    TotalFiles: 1
-                    GatherStats: false
-                    MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.8.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.8.out
deleted file mode 100644
index c3454ee..0000000
--- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.java1.8.out
+++ /dev/null
@@ -1,879 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_FULLOUTERJOIN
-         TOK_TABREF
-            TOK_TABNAME
-               src
-            a
-         TOK_TABREF
-            TOK_TABNAME
-               srcpart
-            b
-         AND
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     a
-                  key
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  key
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  ds
-               '2008-04-08'
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               value
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               value
-      TOK_WHERE
-         AND
-            AND
-               AND
-                  >
-                     .
-                        TOK_TABLE_OR_COL
-                           a
-                        key
-                     10
-                  <
-                     .
-                        TOK_TABLE_OR_COL
-                           a
-                        key
-                     20
-               >
-                  .
-                     TOK_TABLE_OR_COL
-                        b
-                     key
-                  15
-            <
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  key
-               25
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string), ds (type: string)
-                    outputColumnNames: _col0, _col1, _col2
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      tag: 0
-                      value expressions: _col1 (type: string), _col2 (type: string)
-                      auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-09
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-            Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [b]
-              /srcpart/ds=2008-04-08/hr=12 [b]
-              /srcpart/ds=2008-04-09/hr=11 [b]
-              /srcpart/ds=2008-04-09/hr=12 [b]
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      tag: 1
-                      value expressions: _col1 (type: string)
-                      auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE true
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [a]
-        Reducer 2 
-            Needs Tagging: true
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Outer Join 0 to 1
-                filter mappings:
-                  0 [1, 1]
-                filter predicates:
-                  0 {(VALUE._col1 = '2008-04-08')}
-                  1 
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col3, _col4
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  isSamplingPred: false
-                  predicate: ((((UDFToDouble(_col3) > 10.0) and (UDFToDouble(_col3) < 20.0)) and (UDFToDouble(_col0) > 15.0)) and (UDFToDouble(_col0) < 25.0)) (type: boolean)
-                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          properties:
-                            columns _col0,_col1,_col2,_col3
-                            columns.types string:string:string:string
-                            escape.delim \
-                            hive.serialization.extend.additional.nesting.levels true
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key AND b.ds = '2008-04-08')
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19
-PREHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN EXTENDED
- FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_FULLOUTERJOIN
-         TOK_TABREF
-            TOK_TABNAME
-               src
-            a
-         TOK_TABREF
-            TOK_TABNAME
-               srcpart
-            b
-         =
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  a
-               value
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               key
-         TOK_SELEXPR
-            .
-               TOK_TABLE_OR_COL
-                  b
-               value
-      TOK_WHERE
-         AND
-            AND
-               AND
-                  AND
-                     >
-                        .
-                           TOK_TABLE_OR_COL
-                              a
-                           key
-                        10
-                     <
-                        .
-                           TOK_TABLE_OR_COL
-                              a
-                           key
-                        20
-                  >
-                     .
-                        TOK_TABLE_OR_COL
-                           b
-                        key
-                     15
-               <
-                  .
-                     TOK_TABLE_OR_COL
-                        b
-                     key
-                  25
-            =
-               .
-                  TOK_TABLE_OR_COL
-                     b
-                  ds
-               '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-      Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 3 (PARTITION-LEVEL SORT, 2)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: b
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
-                        tag: 0
-                        value expressions: _col1 (type: string)
-                        auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-            Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [b]
-              /srcpart/ds=2008-04-08/hr=12 [b]
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: a
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Filter Operator
-                    isSamplingPred: false
-                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
-                    Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
-                        tag: 1
-                        value expressions: _col1 (type: string)
-                        auto parallelism: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE true
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [a]
-        Reducer 2 
-            Needs Tagging: true
-            Reduce Operator Tree:
-              Join Operator
-                condition map:
-                     Left Outer Join0 to 1
-                keys:
-                  0 _col0 (type: string)
-                  1 _col0 (type: string)
-                outputColumnNames: _col0, _col1, _col3, _col4
-                Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  isSamplingPred: false
-                  predicate: ((UDFToDouble(_col3) > 10.0) and (UDFToDouble(_col3) < 20.0)) (type: boolean)
-                  Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col3 (type: string), _col4 (type: string), _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1, _col2, _col3
-                    Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 0
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.mapred.TextInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                          properties:
-                            columns _col0,_col1,_col2,_col3
-                            columns.types string:string:string:string
-                            escape.delim \
-                            hive.serialization.extend.additional.nesting.levels true
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      TotalFiles: 1
-                      GatherStats: false
-                      MultiFileSpray: false
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: FROM 
-  src a
- FULL OUTER JOIN 
-  srcpart b 
- ON (a.key = b.key)
- SELECT a.key, a.value, b.key, b.value
- WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-17	val_17	17	val_17
-17	val_17	17	val_17
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-18	val_18	18	val_18
-19	val_19	19	val_19
-19	val_19	19	val_19


[43/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
deleted file mode 100644
index 12f41eb..0000000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.java1.8.out
+++ /dev/null
@@ -1,280 +0,0 @@
-PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key) on ('484','51','103')
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            src
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 1
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Static Partition Specification: ds=2008-04-08/hr=11/
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          properties:
-                            bucket_count -1
-                            columns key,value
-                            columns.comments 
-                            columns.types string:string
-#### A masked pattern was here ####
-                            name default.list_bucketing_static_part
-                            partition_columns ds/hr
-                            partition_columns.types string:string
-                            serialization.ddl struct list_bucketing_static_part { string key, string value}
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.list_bucketing_static_part
-                      TotalFiles: 1
-                      GatherStats: true
-                      MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: src
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.src
-                    numFiles 1
-                    numRows 500
-                    rawDataSize 5312
-                    serialization.ddl struct src { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      COLUMN_STATS_ACCURATE true
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.src
-                      numFiles 1
-                      numRows 500
-                      rawDataSize 5312
-                      serialization.ddl struct src { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      totalSize 5812
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src
-                  name: default.src
-            Truncated Path -> Alias:
-              /src [src]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from src
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	4                   
-	numRows             	500                 
-	rawDataSize         	4812                
-	totalSize           	5520                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key]               	 
-Skewed Values:      	[[484], [51], [103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
new file mode 100644
index 0000000..9eca85a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out
@@ -0,0 +1,250 @@
+PREHOOK: query: -- run this test case in minimr to ensure it works in cluster
+
+-- list bucketing DML: static partition. multiple skewed columns.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key) on ('484','51','103')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster
+
+-- list bucketing DML: static partition. multiple skewed columns.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key) on ('484','51','103')
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from src
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from src
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  GatherStats: false
+                  Select Operator
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      GlobalTableId: 1
+#### A masked pattern was here ####
+                      NumFilesPerFileSink: 1
+                      Static Partition Specification: ds=2008-04-08/hr=11/
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                      table:
+                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                          properties:
+                            bucket_count -1
+                            columns key,value
+                            columns.comments 
+                            columns.types string:string
+#### A masked pattern was here ####
+                            name default.list_bucketing_static_part
+                            partition_columns ds/hr
+                            partition_columns.types string:string
+                            serialization.ddl struct list_bucketing_static_part { string key, string value}
+                            serialization.format 1
+                            serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                          name: default.list_bucketing_static_part
+                      TotalFiles: 1
+                      GatherStats: true
+                      MultiFileSpray: false
+            Path -> Alias:
+#### A masked pattern was here ####
+            Path -> Partition:
+#### A masked pattern was here ####
+                Partition
+                  base file name: src
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  properties:
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                    bucket_count -1
+                    columns key,value
+                    columns.comments 'default','default'
+                    columns.types string:string
+#### A masked pattern was here ####
+                    name default.src
+                    numFiles 1
+                    numRows 500
+                    rawDataSize 5312
+                    serialization.ddl struct src { string key, string value}
+                    serialization.format 1
+                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    totalSize 5812
+#### A masked pattern was here ####
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    properties:
+                      COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 'default','default'
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.src
+                      numFiles 1
+                      numRows 500
+                      rawDataSize 5312
+                      serialization.ddl struct src { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                      totalSize 5812
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    name: default.src
+                  name: default.src
+            Truncated Path -> Alias:
+              /src [src]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	4812                
+	totalSize           	5520                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key]               	 
+Skewed Values:      	[[484], [51], [103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [51]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=51, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
deleted file mode 100644
index d8da70c..0000000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.7.out
+++ /dev/null
@@ -1,591 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: srcpart
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 1
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Static Partition Specification: ds=2008-04-08/hr=11/
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          properties:
-                            bucket_count -1
-                            columns key,value
-                            columns.comments 
-                            columns.types string:string
-#### A masked pattern was here ####
-                            name default.list_bucketing_static_part
-                            partition_columns ds/hr
-                            partition_columns.types string:string
-                            serialization.ddl struct list_bucketing_static_part { string key, string value}
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.list_bucketing_static_part
-                      TotalFiles: 1
-                      GatherStats: true
-                      MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-            Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [srcpart]
-              /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 6
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10898
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	12
-51	val_51	2008-04-08	12
-PREHOOK: query: select * from list_bucketing_static_part where key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-90
-PREHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-90
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
deleted file mode 100644
index 23dc6a3..0000000
--- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.java1.8.out
+++ /dev/null
@@ -1,663 +0,0 @@
-PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- list bucketing DML: static partition. multiple skewed columns.
--- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
---  5263 000000_0
---  5263 000001_0
--- ds=2008-04-08/hr=11/key=103/value=val_103:
--- 99 000000_0
--- 99 000001_0
--- ds=2008-04-08/hr=11/key=484/value=val_484:
--- 87 000000_0
--- 87 000001_0
-
--- create a skewed table
-create table list_bucketing_static_part (key String, value String) 
-    partitioned by (ds String, hr String) 
-    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
-    stored as DIRECTORIES
-    STORED AS RCFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_static_part
-PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
-explain extended
-insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_static_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-                  '11'
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Spark
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: srcpart
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  GatherStats: false
-                  Select Operator
-                    expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      GlobalTableId: 1
-#### A masked pattern was here ####
-                      NumFilesPerFileSink: 1
-                      Static Partition Specification: ds=2008-04-08/hr=11/
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                      table:
-                          input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-                          properties:
-                            bucket_count -1
-                            columns key,value
-                            columns.comments 
-                            columns.types string:string
-#### A masked pattern was here ####
-                            name default.list_bucketing_static_part
-                            partition_columns ds/hr
-                            partition_columns.types string:string
-                            serialization.ddl struct list_bucketing_static_part { string key, string value}
-                            serialization.format 1
-                            serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-                          serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-                          name: default.list_bucketing_static_part
-                      TotalFiles: 1
-                      GatherStats: true
-                      MultiFileSpray: false
-            Path -> Alias:
-#### A masked pattern was here ####
-            Path -> Partition:
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=11
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 11
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-#### A masked pattern was here ####
-                Partition
-                  base file name: hr=12
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  partition values:
-                    ds 2008-04-08
-                    hr 12
-                  properties:
-                    COLUMN_STATS_ACCURATE true
-                    bucket_count -1
-                    columns key,value
-                    columns.comments 'default','default'
-                    columns.types string:string
-#### A masked pattern was here ####
-                    name default.srcpart
-                    numFiles 1
-                    numRows 500
-                    partition_columns ds/hr
-                    partition_columns.types string:string
-                    rawDataSize 5312
-                    serialization.ddl struct srcpart { string key, string value}
-                    serialization.format 1
-                    serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    totalSize 5812
-#### A masked pattern was here ####
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 'default','default'
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.srcpart
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct srcpart { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.srcpart
-                  name: default.srcpart
-            Truncated Path -> Alias:
-              /srcpart/ds=2008-04-08/hr=11 [srcpart]
-              /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 11
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
-select key, value from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-PREHOOK: type: SHOWPARTITIONS
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: -- check DML result
-show partitions list_bucketing_static_part
-POSTHOOK: type: SHOWPARTITIONS
-POSTHOOK: Input: default@list_bucketing_static_part
-ds=2008-04-08/hr=11
-PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_static_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	6                   
-	numRows             	1000                
-	rawDataSize         	9624                
-	totalSize           	10898               
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
-InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(*) from list_bucketing_static_part
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(*) from list_bucketing_static_part
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-1000
-PREHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_static_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_ALLCOLREF
-      TOK_WHERE
-         and
-            and
-               and
-                  =
-                     TOK_TABLE_OR_COL
-                        ds
-                     '2008-04-08'
-                  =
-                     TOK_TABLE_OR_COL
-                        hr
-                     '11'
-               =
-                  TOK_TABLE_OR_COL
-                     key
-                  '484'
-            =
-               TOK_TABLE_OR_COL
-                  value
-               'val_484'
-
-
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_static_part
-              numFiles 6
-              numRows 1000
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 9624
-              serialization.ddl struct list_bucketing_static_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              totalSize 10898
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-          
-              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
-              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_static_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_static_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
-              name: default.list_bucketing_static_part
-            name: default.list_bucketing_static_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_static_part
-          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-484	val_484	2008-04-08	11
-484	val_484	2008-04-08	12
-PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none
--- but query should succeed for 51 or 51 and val_14
-select * from srcpart where ds = '2008-04-08' and key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	12
-51	val_51	2008-04-08	12
-PREHOOK: query: select * from list_bucketing_static_part where key = '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-51	val_51	2008-04-08	11
-PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-PREHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select * from list_bucketing_static_part where key = '51' and value = 'val_14'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning
-select count(1) from srcpart where ds = '2008-04-08' and key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key < '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-910
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key <= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-914
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key > '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-86
-PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-90
-PREHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_static_part where key >= '51'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
-#### A masked pattern was here ####
-90
-PREHOOK: query: -- clean up
-drop table list_bucketing_static_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_static_part
-PREHOOK: Output: default@list_bucketing_static_part
-POSTHOOK: query: -- clean up
-drop table list_bucketing_static_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_static_part
-POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out
index 3ee9b5a..c83c02e 100644
Binary files a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out and b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out differ


[36/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/subquery_notin_having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.out
new file mode 100644
index 0000000..c32bf25
--- /dev/null
+++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.out
@@ -0,0 +1,764 @@
+Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: -- non agg, non corr
+
+explain
+select key, count(*) 
+from src 
+group by key
+having key not in  
+  ( select key  from src s1 
+    where s1.key > '12'
+  )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- non agg, non corr
+
+explain
+select key, count(*) 
+from src 
+group by key
+having key not in  
+  ( select key  from src s1 
+    where s1.key > '12'
+  )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-4
+  Stage-3 depends on stages: Stage-2
+  Stage-4 is a root stage
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col1 (type: bigint)
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: bigint)
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key > '12') (type: boolean)
+              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string)
+                outputColumnNames: _col0
+                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col3
+          Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: _col3 is null (type: boolean)
+            Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: bigint)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 151 Data size: 1606 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE
+              Filter Operator
+                predicate: false (type: boolean)
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                    value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Filter Operator
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: -- non agg, corr
+explain
+select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
+  where min(p_retailprice) = l and r - l > 600
+  )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- non agg, corr
+explain
+select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
+  where min(p_retailprice) = l and r - l > 600
+  )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-5
+  Stage-3 depends on stages: Stage-2, Stage-6
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-6 is a root stage
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: p_mfgr (type: string), p_retailprice (type: double)
+              outputColumnNames: p_mfgr, p_retailprice
+              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: min(p_retailprice)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col1 (type: double)
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), _col1 (type: double)
+              sort order: ++
+              Map-reduce partition columns: _col0 (type: string), _col1 (type: double)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), _col1 (type: double)
+              sort order: ++
+              Map-reduce partition columns: _col0 (type: string), _col1 (type: double)
+              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string), _col1 (type: double)
+            1 _col0 (type: string), _col1 (type: double)
+          outputColumnNames: _col0, _col1, _col3
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: _col3 is null (type: boolean)
+            Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: double)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: p_mfgr (type: string), p_retailprice (type: double)
+              outputColumnNames: p_mfgr, p_retailprice
+              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: min(p_retailprice), max(p_retailprice)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: double), _col2 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0), max(VALUE._col1)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: (((_col2 - _col1) > 600.0) and (_col0 is null or _col1 is null)) (type: boolean)
+            Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count()
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: p_mfgr (type: string), p_retailprice (type: double)
+              outputColumnNames: p_mfgr, p_retailprice
+              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: min(p_retailprice), max(p_retailprice)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: double), _col2 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0), max(VALUE._col1)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: ((_col2 - _col1) > 600.0) (type: boolean)
+            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: double)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
+  where min(p_retailprice) = l and r - l > 600
+  )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a 
+  where min(p_retailprice) = l and r - l > 600
+  )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	1173.15
+Manufacturer#2	1690.68
+Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: -- agg, non corr
+explain
+select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from part a
+  group by p_mfgr
+  having max(p_retailprice) - min(p_retailprice) > 600
+  )
+PREHOOK: type: QUERY
+POSTHOOK: query: -- agg, non corr
+explain
+select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from part a
+  group by p_mfgr
+  having max(p_retailprice) - min(p_retailprice) > 600
+  )
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1, Stage-5
+  Stage-3 depends on stages: Stage-2, Stage-6
+  Stage-4 is a root stage
+  Stage-5 depends on stages: Stage-4
+  Stage-6 is a root stage
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: p_mfgr (type: string), p_retailprice (type: double)
+              outputColumnNames: p_mfgr, p_retailprice
+              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: min(p_retailprice)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: string), _col1 (type: double)
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 
+            1 
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 14 Data size: 1730 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: double)
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Left Outer Join0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col0 (type: string)
+          outputColumnNames: _col0, _col1, _col3
+          Statistics: Num rows: 15 Data size: 1903 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: _col3 is null (type: boolean)
+            Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string), _col1 (type: double)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 7 Data size: 888 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: p_mfgr is null (type: boolean)
+              Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: p_retailprice (type: double)
+                outputColumnNames: _col1
+                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: max(_col1), min(_col1)
+                  keys: null (type: string)
+                  mode: hash
+                  outputColumnNames: _col0, _col1, _col2
+                  Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
+                    Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col1 (type: double), _col2 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: max(VALUE._col0), min(VALUE._col1)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: double), _col2 (type: double)
+            outputColumnNames: _col1, _col2
+            Statistics: Num rows: 6 Data size: 726 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((_col1 - _col2) > 600.0) (type: boolean)
+              Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                Statistics: Num rows: 2 Data size: 242 Basic stats: COMPLETE Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-5
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: (_col0 = 0) (type: boolean)
+            Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-6
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: b
+            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: p_mfgr (type: string), p_retailprice (type: double)
+              outputColumnNames: p_mfgr, p_retailprice
+              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: max(p_retailprice), min(p_retailprice)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: double), _col2 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: max(VALUE._col0), min(VALUE._col1)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
+          Filter Operator
+            predicate: ((_col1 - _col2) > 600.0) (type: boolean)
+            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from part a
+  group by p_mfgr
+  having max(p_retailprice) - min(p_retailprice) > 600
+  )
+PREHOOK: type: QUERY
+PREHOOK: Input: default@part
+#### A masked pattern was here ####
+POSTHOOK: query: select b.p_mfgr, min(p_retailprice) 
+from part b 
+group by b.p_mfgr
+having b.p_mfgr not in 
+  (select p_mfgr 
+  from part a
+  group by p_mfgr
+  having max(p_retailprice) - min(p_retailprice) > 600
+  )
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@part
+#### A masked pattern was here ####
+Manufacturer#1	1173.15
+Manufacturer#2	1690.68

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/tez/join0.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/join0.q.java1.7.out b/ql/src/test/results/clientpositive/tez/join0.q.java1.7.out
deleted file mode 100644
index 59d9087..0000000
--- a/ql/src/test/results/clientpositive/tez/join0.q.java1.7.out
+++ /dev/null
@@ -1,239 +0,0 @@
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
--- SORT_QUERY_RESULTS
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/tez/join0.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/join0.q.java1.8.out b/ql/src/test/results/clientpositive/tez/join0.q.java1.8.out
deleted file mode 100644
index 10d7802..0000000
--- a/ql/src/test/results/clientpositive/tez/join0.q.java1.8.out
+++ /dev/null
@@ -1,236 +0,0 @@
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-EXPLAIN
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Map 4 
-            Map Operator Tree:
-                TableScan
-                  alias: src
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: string), _col1 (type: string)
-        Reducer 2 
-            Reduce Operator Tree:
-              Merge Join Operator
-                condition map:
-                     Inner Join 0 to 1
-                keys:
-                  0 
-                  1 
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
-                  sort order: ++++
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN FORMATTED
-SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-#### A masked pattern was here ####
-Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
-PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
-       src2.key as k2, src2.value as v2 FROM 
-  (SELECT * FROM src WHERE src.key < 10) src1 
-    JOIN 
-  (SELECT * FROM src WHERE src.key < 10) src2
-  SORT BY k1, v1, k2, v2
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	0	val_0
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	2	val_2
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	4	val_4
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	5	val_5
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	8	val_8
-0	val_0	9	val_9
-0	val_0	9	val_9
-0	val_0	9	val_9
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	0	val_0
-2	val_2	2	val_2
-2	val_2	4	val_4
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	5	val_5
-2	val_2	8	val_8
-2	val_2	9	val_9
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	0	val_0
-4	val_4	2	val_2
-4	val_4	4	val_4
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	5	val_5
-4	val_4	8	val_8
-4	val_4	9	val_9
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	0	val_0
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	2	val_2
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	4	val_4
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	5	val_5
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	8	val_8
-5	val_5	9	val_9
-5	val_5	9	val_9
-5	val_5	9	val_9
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	0	val_0
-8	val_8	2	val_2
-8	val_8	4	val_4
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	5	val_5
-8	val_8	8	val_8
-8	val_8	9	val_9
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	0	val_0
-9	val_9	2	val_2
-9	val_9	4	val_4
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	5	val_5
-9	val_9	8	val_8
-9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/tez/join0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/join0.q.out b/ql/src/test/results/clientpositive/tez/join0.q.out
new file mode 100644
index 0000000..67d71d5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/join0.q.out
@@ -0,0 +1,237 @@
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: -- SORT_QUERY_RESULTS
+
+EXPLAIN
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+        Map 4 
+            Map Operator Tree:
+                TableScan
+                  alias: src
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Filter Operator
+                    predicate: (key < 10) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: key (type: string), value (type: string)
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col1 (type: string)
+        Reducer 2 
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 
+                  1 
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string)
+                  sort order: ++++
+                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN FORMATTED
+SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+#### A masked pattern was here ####
+Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product
+PREHOOK: query: SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, 
+       src2.key as k2, src2.value as v2 FROM 
+  (SELECT * FROM src WHERE src.key < 10) src1 
+    JOIN 
+  (SELECT * FROM src WHERE src.key < 10) src2
+  SORT BY k1, v1, k2, v2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	0	val_0
+0	val_0	2	val_2
+0	val_0	2	val_2
+0	val_0	2	val_2
+0	val_0	4	val_4
+0	val_0	4	val_4
+0	val_0	4	val_4
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	5	val_5
+0	val_0	8	val_8
+0	val_0	8	val_8
+0	val_0	8	val_8
+0	val_0	9	val_9
+0	val_0	9	val_9
+0	val_0	9	val_9
+2	val_2	0	val_0
+2	val_2	0	val_0
+2	val_2	0	val_0
+2	val_2	2	val_2
+2	val_2	4	val_4
+2	val_2	5	val_5
+2	val_2	5	val_5
+2	val_2	5	val_5
+2	val_2	8	val_8
+2	val_2	9	val_9
+4	val_4	0	val_0
+4	val_4	0	val_0
+4	val_4	0	val_0
+4	val_4	2	val_2
+4	val_4	4	val_4
+4	val_4	5	val_5
+4	val_4	5	val_5
+4	val_4	5	val_5
+4	val_4	8	val_8
+4	val_4	9	val_9
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	0	val_0
+5	val_5	2	val_2
+5	val_5	2	val_2
+5	val_5	2	val_2
+5	val_5	4	val_4
+5	val_5	4	val_4
+5	val_5	4	val_4
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	5	val_5
+5	val_5	8	val_8
+5	val_5	8	val_8
+5	val_5	8	val_8
+5	val_5	9	val_9
+5	val_5	9	val_9
+5	val_5	9	val_9
+8	val_8	0	val_0
+8	val_8	0	val_0
+8	val_8	0	val_0
+8	val_8	2	val_2
+8	val_8	4	val_4
+8	val_8	5	val_5
+8	val_8	5	val_5
+8	val_8	5	val_5
+8	val_8	8	val_8
+8	val_8	9	val_9
+9	val_9	0	val_0
+9	val_9	0	val_0
+9	val_9	0	val_0
+9	val_9	2	val_2
+9	val_9	4	val_4
+9	val_9	5	val_5
+9	val_9	5	val_5
+9	val_9	5	val_5
+9	val_9	8	val_8
+9	val_9	9	val_9

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
deleted file mode 100644
index 420e788..0000000
--- a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.7.out
+++ /dev/null
@@ -1,218 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-#### A masked pattern was here ####
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: i (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: avg(50), avg(50.0), avg(50)
-                      keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-            Execution mode: vectorized
-        Reducer 2 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  TopN Hash Memory Usage: 0.1
-                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
-        Reducer 3 
-            Execution mode: vectorized
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50.0000
-65537	50.0	50.0	50.0000
-65538	50.0	50.0	50.0000
-65539	50.0	50.0	50.0000
-65540	50.0	50.0	50.0000
-65541	50.0	50.0	50.0000
-65542	50.0	50.0	50.0000
-65543	50.0	50.0	50.0000
-65544	50.0	50.0	50.0000
-65545	50.0	50.0	50.0000

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.8.out b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.8.out
deleted file mode 100644
index 331edd0..0000000
--- a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.java1.8.out
+++ /dev/null
@@ -1,216 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-DROP TABLE over1k
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: DROP TABLE over1korc
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: DROP TABLE over1korc
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1k
-POSTHOOK: query: -- data setup
-CREATE TABLE over1k(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
-STORED AS TEXTFILE
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1k
-PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-PREHOOK: type: LOAD
-#### A masked pattern was here ####
-PREHOOK: Output: default@over1k
-POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
-POSTHOOK: type: LOAD
-#### A masked pattern was here ####
-POSTHOOK: Output: default@over1k
-PREHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
-           si smallint,
-           i int,
-           b bigint,
-           f float,
-           d double,
-           bo boolean,
-           s string,
-           ts timestamp,
-           dec decimal(4,2),
-           bin binary)
-STORED AS ORC
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@over1korc
-PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1k
-PREHOOK: Output: default@over1korc
-POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1k
-POSTHOOK: Output: default@over1korc
-POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
-POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
-POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
-POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
-POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
-POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
-POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
-POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
-POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
-POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
-POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
-PREHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-POSTHOOK: query: EXPLAIN SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Tez
-      Edges:
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
-#### A masked pattern was here ####
-      Vertices:
-        Map 1 
-            Map Operator Tree:
-                TableScan
-                  alias: over1korc
-                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: i (type: int)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: avg(50), avg(50.0), avg(50)
-                      keys: _col0 (type: int)
-                      mode: hash
-                      outputColumnNames: _col0, _col1, _col2, _col3
-                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
-            Execution mode: vectorized
-        Reducer 2 
-            Reduce Operator Tree:
-              Group By Operator
-                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
-                keys: KEY._col0 (type: int)
-                mode: mergepartial
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: int)
-                  sort order: +
-                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
-        Reducer 3 
-            Execution mode: vectorized
-            Reduce Operator Tree:
-              Select Operator
-                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Limit
-                  Number of rows: 10
-                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 10
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-PREHOOK: type: QUERY
-PREHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-POSTHOOK: query: SELECT 
-  i,
-  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
-  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
-  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
-  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@over1korc
-#### A masked pattern was here ####
-65536	50.0	50.0	50
-65537	50.0	50.0	50
-65538	50.0	50.0	50
-65539	50.0	50.0	50
-65540	50.0	50.0	50
-65541	50.0	50.0	50
-65542	50.0	50.0	50
-65543	50.0	50.0	50
-65544	50.0	50.0	50
-65545	50.0	50.0	50

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.out
new file mode 100644
index 0000000..46b13c8
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_cast_constant.q.out
@@ -0,0 +1,214 @@
+PREHOOK: query: DROP TABLE over1k
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE over1k
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE over1korc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE over1korc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: -- data setup
+CREATE TABLE over1k(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1k
+POSTHOOK: query: -- data setup
+CREATE TABLE over1k(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1k
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over1k
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE over1k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over1k
+PREHOOK: query: CREATE TABLE over1korc(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over1korc
+POSTHOOK: query: CREATE TABLE over1korc(t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over1korc
+PREHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1k
+PREHOOK: Output: default@over1korc
+POSTHOOK: query: INSERT INTO TABLE over1korc SELECT * FROM over1k
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1k
+POSTHOOK: Output: default@over1korc
+POSTHOOK: Lineage: over1korc.b SIMPLE [(over1k)over1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: over1korc.bin SIMPLE [(over1k)over1k.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: over1korc.bo SIMPLE [(over1k)over1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: over1korc.d SIMPLE [(over1k)over1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: over1korc.dec SIMPLE [(over1k)over1k.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: over1korc.f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: over1korc.i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: over1korc.s SIMPLE [(over1k)over1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: over1korc.si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: over1korc.t SIMPLE [(over1k)over1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: over1korc.ts SIMPLE [(over1k)over1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+PREHOOK: query: EXPLAIN SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: over1korc
+                  Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: i (type: int)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: avg(50), avg(50.0), avg(50)
+                      keys: _col0 (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3
+                      Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
+                        Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: struct<count:bigint,sum:double,input:double>), _col3 (type: struct<count:bigint,sum:decimal(12,0),input:decimal(10,0)>)
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: avg(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2)
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                  TopN Hash Memory Usage: 0.1
+                  value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4))
+        Reducer 3 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Select Operator
+                expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4))
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                Limit
+                  Number of rows: 10
+                  Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 10
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over1korc
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT 
+  i,
+  AVG(CAST(50 AS INT)) AS `avg_int_ok`,
+  AVG(CAST(50 AS DOUBLE)) AS `avg_double_ok`,
+  AVG(CAST(50 AS DECIMAL)) AS `avg_decimal_ok`
+  FROM over1korc GROUP BY i ORDER BY i LIMIT 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over1korc
+#### A masked pattern was here ####
+65536	50.0	50.0	50.0000
+65537	50.0	50.0	50.0000
+65538	50.0	50.0	50.0000
+65539	50.0	50.0	50.0000
+65540	50.0	50.0	50.0000
+65541	50.0	50.0	50.0000
+65542	50.0	50.0	50.0000
+65543	50.0	50.0	50.0000
+65544	50.0	50.0	50.0000
+65545	50.0	50.0	50.0000


[52/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
new file mode 100644
index 0000000..5f0406a
--- /dev/null
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out
@@ -0,0 +1,811 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- after merge
+-- 142 000000_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+-- after merge
+-- 118 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+-- SORT_QUERY_RESULTS
+
+-- list bucketing DML: static partition. multiple skewed columns. merge.
+-- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME:
+--  5263 000000_0
+--  5263 000001_0
+-- ds=2008-04-08/hr=11/key=103/value=val_103:
+-- 99 000000_0
+-- 99 000001_0
+-- after merge
+-- 142 000000_0
+-- ds=2008-04-08/hr=11/key=484/value=val_484:
+-- 87 000000_0
+-- 87 000001_0
+-- after merge
+-- 118 000001_0
+
+-- create a skewed table
+create table list_bucketing_static_part (key String, value String) 
+    partitioned by (ds String, hr String) 
+    skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103'))
+    stored as DIRECTORIES
+    STORED AS RCFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@list_bucketing_static_part
+PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns ds/hr
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	6                   
+	numRows             	1000                
+	rawDataSize         	9624                
+	totalSize           	10898               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files.
+explain extended
+insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
+  Stage-4
+  Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
+  Stage-2 depends on stages: Stage-0
+  Stage-3
+  Stage-5
+  Stage-6 depends on stages: Stage-5
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: srcpart
+            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+            GatherStats: false
+            Select Operator
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                GlobalTableId: 1
+#### A masked pattern was here ####
+                NumFilesPerFileSink: 1
+                Static Partition Specification: ds=2008-04-08/hr=11/
+                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+#### A masked pattern was here ####
+                table:
+                    input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+                    properties:
+                      bucket_count -1
+                      columns key,value
+                      columns.comments 
+                      columns.types string:string
+#### A masked pattern was here ####
+                      name default.list_bucketing_static_part
+                      partition_columns.types string:string
+                      serialization.ddl struct list_bucketing_static_part { string key, string value}
+                      serialization.format 1
+                      serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+                    serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+                    name: default.list_bucketing_static_part
+                TotalFiles: 1
+                GatherStats: true
+                MultiFileSpray: false
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=11
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+#### A masked pattern was here ####
+          Partition
+            base file name: hr=12
+            input format: org.apache.hadoop.mapred.TextInputFormat
+            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 12
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
+              bucket_count -1
+              columns key,value
+              columns.comments 'default','default'
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.srcpart
+              numFiles 1
+              numRows 500
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 5312
+              serialization.ddl struct srcpart { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              totalSize 5812
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+          
+              input format: org.apache.hadoop.mapred.TextInputFormat
+              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 'default','default'
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.srcpart
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct srcpart { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+              name: default.srcpart
+            name: default.srcpart
+      Truncated Path -> Alias:
+        /srcpart/ds=2008-04-08/hr=11 [srcpart]
+        /srcpart/ds=2008-04-08/hr=12 [srcpart]
+
+  Stage: Stage-7
+    Conditional Operator
+
+  Stage: Stage-4
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+  Stage: Stage-0
+    Move Operator
+      tables:
+          partition:
+            ds 2008-04-08
+            hr 11
+          replace: true
+#### A masked pattern was here ####
+          table:
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+
+  Stage: Stage-2
+    Stats-Aggr Operator
+#### A masked pattern was here ####
+
+  Stage: Stage-3
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              partition_columns.types string:string
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-5
+    Merge File Operator
+      Map Operator Tree:
+          RCFile Merge Operator
+      merge level: block
+      Path -> Alias:
+#### A masked pattern was here ####
+      Path -> Partition:
+#### A masked pattern was here ####
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            properties:
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              partition_columns.types string:string
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+      Truncated Path -> Alias:
+#### A masked pattern was here ####
+
+  Stage: Stage-6
+    Move Operator
+      files:
+          hdfs directory: true
+#### A masked pattern was here ####
+
+PREHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: query: insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08',  hr = '11')
+select key, value from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+PREHOOK: type: SHOWPARTITIONS
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: -- check DML result
+show partitions list_bucketing_static_part
+POSTHOOK: type: SHOWPARTITIONS
+POSTHOOK: Input: default@list_bucketing_static_part
+ds=2008-04-08/hr=11
+PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+# col_name            	data_type           	comment             
+	 	 
+key                 	string              	                    
+value               	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	list_bucketing_static_part	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	1000                
+	rawDataSize         	9624                
+	totalSize           	10786               
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[key, value]        	 
+Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+1000
+PREHOOK: query: select count(*) from list_bucketing_static_part
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from list_bucketing_static_part
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+1000
+PREHOOK: query: explain extended
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain extended
+select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Partition Description:
+          Partition
+            input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+            output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+            partition values:
+              ds 2008-04-08
+              hr 11
+            properties:
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
+              bucket_count -1
+              columns key,value
+              columns.comments 
+              columns.types string:string
+#### A masked pattern was here ####
+              name default.list_bucketing_static_part
+              numFiles 4
+              numRows 1000
+              partition_columns ds/hr
+              partition_columns.types string:string
+              rawDataSize 9624
+              serialization.ddl struct list_bucketing_static_part { string key, string value}
+              serialization.format 1
+              serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              totalSize 10786
+#### A masked pattern was here ####
+            serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+          
+              input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat
+              output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat
+              properties:
+                bucket_count -1
+                columns key,value
+                columns.comments 
+                columns.types string:string
+#### A masked pattern was here ####
+                name default.list_bucketing_static_part
+                partition_columns ds/hr
+                partition_columns.types string:string
+                serialization.ddl struct list_bucketing_static_part { string key, string value}
+                serialization.format 1
+                serialization.lib org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+#### A masked pattern was here ####
+              serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe
+              name: default.list_bucketing_static_part
+            name: default.list_bucketing_static_part
+      Processor Tree:
+        TableScan
+          alias: list_bucketing_static_part
+          Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE
+          GatherStats: false
+          Filter Operator
+            isSamplingPred: false
+            predicate: ((key = '484') and (value = 'val_484')) (type: boolean)
+            Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: '484' (type: string), 'val_484' (type: string), '2008-04-08' (type: string), '11' (type: string)
+              outputColumnNames: _col0, _col1, _col2, _col3
+              Statistics: Num rows: 250 Data size: 2406 Basic stats: COMPLETE Column stats: NONE
+              ListSink
+
+PREHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+POSTHOOK: query: select * from list_bucketing_static_part where ds = '2008-04-08' and  hr = '11' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	11
+PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '484' and value = 'val_484'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+#### A masked pattern was here ####
+484	val_484	2008-04-08	11
+484	val_484	2008-04-08	12
+PREHOOK: query: -- clean up
+drop table list_bucketing_static_part
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@list_bucketing_static_part
+PREHOOK: Output: default@list_bucketing_static_part
+POSTHOOK: query: -- clean up
+drop table list_bucketing_static_part
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@list_bucketing_static_part
+POSTHOOK: Output: default@list_bucketing_static_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
deleted file mode 100644
index a0947b2..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
+++ /dev/null
@@ -1,506 +0,0 @@
-PREHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) 
-stored as DIRECTORIES
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) 
-stored as DIRECTORIES
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	3                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 12]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
-	numFiles            	3                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=484/value=val_484}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-103	val_103
-103	val_103
-103	val_103
-103	val_103
-PREHOOK: query: explain extended
-select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Partition Description:
-          Partition
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 3
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-          Partition
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true"}
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 3
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      Processor Tree:
-        TableScan
-          alias: list_bucketing_dynamic_part
-          Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-          GatherStats: false
-          Filter Operator
-            isSamplingPred: false
-            predicate: ((key = '103') and (value = 'val_103')) (type: boolean)
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: '103' (type: string), 'val_103' (type: string), '2008-04-08' (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2, _col3
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              ListSink
-
-PREHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-103	val_103	2008-04-08	11
-103	val_103	2008-04-08	11
-103	val_103	2008-04-08	12
-103	val_103	2008-04-08	12
-PREHOOK: query: -- clean up resources
-drop table list_bucketing_dynamic_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- clean up resources
-drop table list_bucketing_dynamic_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
deleted file mode 100644
index 1c33382..0000000
--- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.8.out
+++ /dev/null
@@ -1,617 +0,0 @@
-PREHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) 
-stored as DIRECTORIES
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages
-
--- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
--- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
--- create a skewed table
-create table list_bucketing_dynamic_part (key String, value String) 
-partitioned by (ds String, hr String) 
-skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) 
-stored as DIRECTORIES
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@list_bucketing_dynamic_part
-PREHOOK: query: -- list bucketing DML
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-PREHOOK: type: QUERY
-POSTHOOK: query: -- list bucketing DML
-explain extended
-insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            srcpart
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_TAB
-            TOK_TABNAME
-               list_bucketing_dynamic_part
-            TOK_PARTSPEC
-               TOK_PARTVAL
-                  ds
-                  '2008-04-08'
-               TOK_PARTVAL
-                  hr
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               hr
-      TOK_WHERE
-         =
-            TOK_TABLE_OR_COL
-               ds
-            '2008-04-08'
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-  Stage-2 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: srcpart
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Select Operator
-              expressions: key (type: string), value (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                GlobalTableId: 1
-#### A masked pattern was here ####
-                NumFilesPerFileSink: 1
-                Static Partition Specification: ds=2008-04-08/
-                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    properties:
-                      bucket_count -1
-                      columns key,value
-                      columns.comments 
-                      columns.types string:string
-#### A masked pattern was here ####
-                      name default.list_bucketing_dynamic_part
-                      partition_columns ds/hr
-                      partition_columns.types string:string
-                      serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                      serialization.format 1
-                      serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.list_bucketing_dynamic_part
-                TotalFiles: 1
-                GatherStats: true
-                MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=11
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-#### A masked pattern was here ####
-          Partition
-            base file name: hr=12
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 'default','default'
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.srcpart
-              numFiles 1
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct srcpart { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 'default','default'
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.srcpart
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct srcpart { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.srcpart
-            name: default.srcpart
-      Truncated Path -> Alias:
-        /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
-        /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          partition:
-            ds 2008-04-08
-            hr 
-          replace: true
-#### A masked pattern was here ####
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.list_bucketing_dynamic_part
-
-  Stage: Stage-2
-    Stats-Aggr Operator
-#### A masked pattern was here ####
-
-PREHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08
-POSTHOOK: query: insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
-PREHOOK: query: -- check DML result
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- check DML result
-desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 11]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	3                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12')
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='12')
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-# col_name            	data_type           	comment             
-	 	 
-key                 	string              	                    
-value               	string              	                    
-	 	 
-# Partition Information	 	 
-# col_name            	data_type           	comment             
-	 	 
-ds                  	string              	                    
-hr                  	string              	                    
-	 	 
-# Detailed Partition Information	 	 
-Partition Value:    	[2008-04-08, 12]    	 
-Database:           	default             	 
-Table:              	list_bucketing_dynamic_part	 
-#### A masked pattern was here ####
-Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	true                
-	numFiles            	3                   
-	numRows             	500                 
-	rawDataSize         	5312                
-	totalSize           	5812                
-#### A masked pattern was here ####
-	 	 
-# Storage Information	 	 
-SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
-InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
-OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
-Compressed:         	No                  	 
-Num Buckets:        	-1                  	 
-Bucket Columns:     	[]                  	 
-Sort Columns:       	[]                  	 
-Stored As SubDirectories:	Yes                 	 
-Skewed Columns:     	[key, value]        	 
-Skewed Values:      	[[484, val_484], [51, val_14], [103, val_103]]	 
-#### A masked pattern was here ####
-Skewed Value to Truncated Path:	{[484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=484/value=val_484, [103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=103/value=val_103}	 
-Storage Desc Params:	 	 
-	serialization.format	1                   
-PREHOOK: query: select count(1) from srcpart where ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from srcpart where ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select count(1) from list_bucketing_dynamic_part where ds='2008-04-08'
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-1000
-PREHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@srcpart
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value from srcpart where ds='2008-04-08' and key = "103" and value ="val_103"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@srcpart
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-103	val_103
-103	val_103
-103	val_103
-103	val_103
-PREHOOK: query: explain extended
-select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-PREHOOK: type: QUERY
-POSTHOOK: query: explain extended
-select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-POSTHOOK: type: QUERY
-ABSTRACT SYNTAX TREE:
-  
-TOK_QUERY
-   TOK_FROM
-      TOK_TABREF
-         TOK_TABNAME
-            list_bucketing_dynamic_part
-   TOK_INSERT
-      TOK_DESTINATION
-         TOK_DIR
-            TOK_TMP_FILE
-      TOK_SELECT
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               key
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               value
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               ds
-         TOK_SELEXPR
-            TOK_TABLE_OR_COL
-               hr
-      TOK_WHERE
-         and
-            and
-               =
-                  TOK_TABLE_OR_COL
-                     ds
-                  '2008-04-08'
-               =
-                  TOK_TABLE_OR_COL
-                     key
-                  "103"
-            =
-               TOK_TABLE_OR_COL
-                  value
-               "val_103"
-
-
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: list_bucketing_dynamic_part
-            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-            GatherStats: false
-            Filter Operator
-              isSamplingPred: false
-              predicate: ((key = '103') and (value = 'val_103')) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: '103' (type: string), 'val_103' (type: string), '2008-04-08' (type: string), hr (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  GlobalTableId: 0
-#### A masked pattern was here ####
-                  NumFilesPerFileSink: 1
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-#### A masked pattern was here ####
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      properties:
-                        columns _col0,_col1,_col2,_col3
-                        columns.types string:string:string:string
-                        escape.delim \
-                        hive.serialization.extend.nesting.levels true
-                        serialization.format 1
-                        serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                  TotalFiles: 1
-                  GatherStats: false
-                  MultiFileSpray: false
-      Path -> Alias:
-#### A masked pattern was here ####
-      Path -> Partition:
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_103
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 11
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 3
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-#### A masked pattern was here ####
-          Partition
-            base file name: value=val_103
-            input format: org.apache.hadoop.mapred.TextInputFormat
-            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-            partition values:
-              ds 2008-04-08
-              hr 12
-            properties:
-              COLUMN_STATS_ACCURATE true
-              bucket_count -1
-              columns key,value
-              columns.comments 
-              columns.types string:string
-#### A masked pattern was here ####
-              name default.list_bucketing_dynamic_part
-              numFiles 3
-              numRows 500
-              partition_columns ds/hr
-              partition_columns.types string:string
-              rawDataSize 5312
-              serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-              serialization.format 1
-              serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              totalSize 5812
-#### A masked pattern was here ####
-            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-          
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              properties:
-                bucket_count -1
-                columns key,value
-                columns.comments 
-                columns.types string:string
-#### A masked pattern was here ####
-                name default.list_bucketing_dynamic_part
-                partition_columns ds/hr
-                partition_columns.types string:string
-                serialization.ddl struct list_bucketing_dynamic_part { string key, string value}
-                serialization.format 1
-                serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-#### A masked pattern was here ####
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.list_bucketing_dynamic_part
-            name: default.list_bucketing_dynamic_part
-      Truncated Path -> Alias:
-        /list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=103/value=val_103 [list_bucketing_dynamic_part]
-        /list_bucketing_dynamic_part/ds=2008-04-08/hr=12/key=103/value=val_103 [list_bucketing_dynamic_part]
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-PREHOOK: type: QUERY
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-PREHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-POSTHOOK: query: select key, value, ds, hr from list_bucketing_dynamic_part where ds='2008-04-08' and key = "103" and value ="val_103"
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12
-#### A masked pattern was here ####
-103	val_103	2008-04-08	11
-103	val_103	2008-04-08	11
-103	val_103	2008-04-08	12
-103	val_103	2008-04-08	12
-PREHOOK: query: -- clean up resources
-drop table list_bucketing_dynamic_part
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@list_bucketing_dynamic_part
-PREHOOK: Output: default@list_bucketing_dynamic_part
-POSTHOOK: query: -- clean up resources
-drop table list_bucketing_dynamic_part
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@list_bucketing_dynamic_part
-POSTHOOK: Output: default@list_bucketing_dynamic_part


[65/66] [abbrv] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out
index 8653c2f..bebb007 100644
--- a/ql/src/test/results/clientpositive/join33.q.out
+++ b/ql/src/test/results/clientpositive/join33.q.out
@@ -159,7 +159,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -179,7 +179,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -203,7 +203,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -223,7 +223,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -250,7 +250,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join34.q.out b/ql/src/test/results/clientpositive/join34.q.out
index bb23644..365992b 100644
--- a/ql/src/test/results/clientpositive/join34.q.out
+++ b/ql/src/test/results/clientpositive/join34.q.out
@@ -197,7 +197,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -217,7 +217,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -241,7 +241,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -261,7 +261,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join35.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join35.q.out b/ql/src/test/results/clientpositive/join35.q.out
index b1732ec..2c2681f 100644
--- a/ql/src/test/results/clientpositive/join35.q.out
+++ b/ql/src/test/results/clientpositive/join35.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -102,7 +102,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -324,7 +324,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -344,7 +344,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -435,7 +435,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -455,7 +455,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out
index 180d46c..efddb5d 100644
--- a/ql/src/test/results/clientpositive/join9.q.out
+++ b/ql/src/test/results/clientpositive/join9.q.out
@@ -77,7 +77,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -97,7 +97,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -124,7 +124,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_map_ppr.q.out b/ql/src/test/results/clientpositive/join_map_ppr.q.out
index 928d4fb..e44ceac 100644
--- a/ql/src/test/results/clientpositive/join_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/join_map_ppr.q.out
@@ -148,7 +148,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -716,7 +716,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
index 2b7ebb2..1d43bc0 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
@@ -88,7 +88,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -134,7 +134,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
index 5016855..ebbbb26 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out
@@ -85,7 +85,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -105,7 +105,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
index 548815a..385e113 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
@@ -82,7 +82,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -128,7 +128,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
index b8757de..224ecc2 100644
--- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
@@ -114,7 +114,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -160,7 +160,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -422,7 +422,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -468,7 +468,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/load_dyn_part8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/load_dyn_part8.q.out
index 757e7dd..a8247f5 100644
--- a/ql/src/test/results/clientpositive/load_dyn_part8.q.out
+++ b/ql/src/test/results/clientpositive/load_dyn_part8.q.out
@@ -148,7 +148,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -194,7 +194,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -240,7 +240,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -286,7 +286,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out
index fd127ec..c1319f8 100644
--- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out
@@ -79,7 +79,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -99,7 +99,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -126,7 +126,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -172,7 +172,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -371,7 +371,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -391,7 +391,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -418,7 +418,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -464,7 +464,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -510,7 +510,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -556,7 +556,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -770,7 +770,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -790,7 +790,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -817,7 +817,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -863,7 +863,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1058,7 +1058,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1078,7 +1078,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1105,7 +1105,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1151,7 +1151,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
index 85bd14b..17a1cde 100644
--- a/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
@@ -129,7 +129,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -149,7 +149,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -173,7 +173,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -193,7 +193,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -220,7 +220,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -266,7 +266,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -312,7 +312,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -358,7 +358,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out b/ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out
index 7d8655f..a3cc93e 100644
--- a/ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out
+++ b/ql/src/test/results/clientpositive/offset_limit_global_optimizer.q.out
@@ -58,7 +58,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -190,7 +190,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -236,7 +236,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -282,7 +282,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -328,7 +328,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -463,7 +463,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -509,7 +509,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -555,7 +555,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -601,7 +601,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -746,7 +746,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -792,7 +792,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -838,7 +838,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -884,7 +884,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1609,7 +1609,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1741,7 +1741,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1787,7 +1787,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1833,7 +1833,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1879,7 +1879,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2014,7 +2014,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2060,7 +2060,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2106,7 +2106,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2152,7 +2152,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2297,7 +2297,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2343,7 +2343,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2389,7 +2389,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -2435,7 +2435,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/optimize_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/optimize_nullscan.q.out
index 4a693d6..b045cc5 100644
--- a/ql/src/test/results/clientpositive/optimize_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/optimize_nullscan.q.out
@@ -64,7 +64,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -84,7 +84,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -252,7 +252,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -297,7 +297,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -342,7 +342,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -387,7 +387,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -518,7 +518,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -538,7 +538,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -660,7 +660,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -680,7 +680,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -861,7 +861,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -907,7 +907,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -953,7 +953,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -999,7 +999,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1140,7 +1140,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1185,7 +1185,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1230,7 +1230,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1275,7 +1275,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1404,7 +1404,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1424,7 +1424,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1558,7 +1558,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1578,7 +1578,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1720,7 +1720,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1740,7 +1740,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1834,7 +1834,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1854,7 +1854,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1967,7 +1967,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.hive.ql.io.OneNullRowInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1987,7 +1987,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
index b7a09d2..06c178c 100644
--- a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
+++ b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out
@@ -373,7 +373,7 @@ Database:           	default
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 #### A masked pattern was here ####
 	numFiles            	2                   
 	numRows             	30                  
@@ -424,7 +424,7 @@ Database:           	default
 Table:              	partcoltypenum      	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 #### A masked pattern was here ####
 	numFiles            	2                   
 	numRows             	30                  

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/pcr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out
index 9daddfb..9fb1481 100644
--- a/ql/src/test/results/clientpositive/pcr.q.out
+++ b/ql/src/test/results/clientpositive/pcr.q.out
@@ -4635,7 +4635,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -4764,7 +4764,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -4810,7 +4810,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -4943,7 +4943,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -4989,7 +4989,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/pcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcs.q.out b/ql/src/test/results/clientpositive/pcs.q.out
index 8b99401..0045c1c 100644
--- a/ql/src/test/results/clientpositive/pcs.q.out
+++ b/ql/src/test/results/clientpositive/pcs.q.out
@@ -120,7 +120,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -165,7 +165,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -310,7 +310,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -355,7 +355,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -472,7 +472,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -517,7 +517,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -626,7 +626,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -671,7 +671,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -827,7 +827,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -872,7 +872,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1164,7 +1164,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1303,7 +1303,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1348,7 +1348,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1413,7 +1413,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1456,7 +1456,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1499,7 +1499,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-10
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1586,7 +1586,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-08
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1629,7 +1629,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-09
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 
@@ -1672,7 +1672,7 @@ STAGE PLANS:
             partition values:
               ds 2000-04-10
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/ppd_join_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/ppd_join_filter.q.out
index d8e5009..b63161b 100644
--- a/ql/src/test/results/clientpositive/ppd_join_filter.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join_filter.q.out
@@ -65,7 +65,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -85,7 +85,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -202,7 +202,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -222,7 +222,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -387,7 +387,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -407,7 +407,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -524,7 +524,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -544,7 +544,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -709,7 +709,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -729,7 +729,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -846,7 +846,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -866,7 +866,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1031,7 +1031,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1051,7 +1051,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1168,7 +1168,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1188,7 +1188,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/ppd_vc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_vc.q.out b/ql/src/test/results/clientpositive/ppd_vc.q.out
index a82a709..21181ac 100644
--- a/ql/src/test/results/clientpositive/ppd_vc.q.out
+++ b/ql/src/test/results/clientpositive/ppd_vc.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -108,7 +108,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -154,7 +154,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -200,7 +200,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -370,7 +370,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -390,7 +390,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -417,7 +417,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -463,7 +463,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -509,7 +509,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -555,7 +555,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'


[66/66] [abbrv] hive git commit: HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
HIVE-13409: Fix JDK8 test failures related to COLUMN_STATS_ACCURATE (Mohit Sabharwal, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bd651756
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bd651756
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bd651756

Branch: refs/heads/java8
Commit: bd651756a6be2061e609404d0805b3d01b9d7b7f
Parents: 9687dcc
Author: Mohit Sabharwal <mo...@cloudera.com>
Authored: Tue May 24 09:30:32 2016 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Thu May 26 10:39:19 2016 -0500

----------------------------------------------------------------------
 .../test/results/clientpositive/bucket1.q.out   |  4 +-
 .../test/results/clientpositive/bucket2.q.out   |  4 +-
 .../test/results/clientpositive/bucket3.q.out   |  4 +-
 .../test/results/clientpositive/bucket4.q.out   |  4 +-
 .../test/results/clientpositive/bucket5.q.out   |  4 +-
 .../results/clientpositive/bucket_many.q.out    |  4 +-
 .../columnStatsUpdateForStatsOptimizer_1.q.out  |  8 +--
 .../columnStatsUpdateForStatsOptimizer_2.q.out  |  2 +-
 .../constantPropagateForSubQuery.q.out          |  8 +--
 ql/src/test/results/clientpositive/ctas.q.out   |  4 +-
 .../results/clientpositive/describe_table.q.out |  4 +-
 .../disable_merge_for_bucketing.q.out           |  4 +-
 .../extrapolate_part_stats_full.q.out           | 24 ++++-----
 .../extrapolate_part_stats_partial.q.out        | 32 +++++------
 .../extrapolate_part_stats_partial_ndv.q.out    | 16 +++---
 .../clientpositive/fouter_join_ppr.q.out        | 40 +++++++-------
 .../clientpositive/groupby_map_ppr.q.out        |  4 +-
 .../groupby_map_ppr_multi_distinct.q.out        |  4 +-
 .../results/clientpositive/groupby_ppr.q.out    |  4 +-
 .../groupby_ppr_multi_distinct.q.out            |  4 +-
 .../test/results/clientpositive/input23.q.out   |  2 +-
 .../test/results/clientpositive/input42.q.out   | 12 ++---
 .../results/clientpositive/input_part1.q.out    |  2 +-
 .../results/clientpositive/input_part2.q.out    |  4 +-
 .../results/clientpositive/input_part7.q.out    |  4 +-
 .../results/clientpositive/input_part9.q.out    |  4 +-
 ql/src/test/results/clientpositive/join17.q.out |  4 +-
 ql/src/test/results/clientpositive/join26.q.out |  2 +-
 ql/src/test/results/clientpositive/join32.q.out | 10 ++--
 .../clientpositive/join32_lessSize.q.out        | 46 ++++++++--------
 ql/src/test/results/clientpositive/join33.q.out | 10 ++--
 ql/src/test/results/clientpositive/join34.q.out |  8 +--
 ql/src/test/results/clientpositive/join35.q.out | 12 ++---
 ql/src/test/results/clientpositive/join9.q.out  |  6 +--
 .../results/clientpositive/join_map_ppr.q.out   |  4 +-
 .../clientpositive/list_bucket_dml_1.q.out      |  4 +-
 .../clientpositive/list_bucket_dml_14.q.out     |  4 +-
 .../clientpositive/list_bucket_dml_3.q.out      |  4 +-
 .../clientpositive/list_bucket_dml_7.q.out      |  8 +--
 .../results/clientpositive/load_dyn_part8.q.out |  8 +--
 .../clientpositive/louter_join_ppr.q.out        | 36 ++++++-------
 .../clientpositive/mapjoin_mapjoin.q.out        | 16 +++---
 .../offset_limit_global_optimizer.q.out         | 52 +++++++++---------
 .../clientpositive/optimize_nullscan.q.out      | 56 ++++++++++----------
 .../partition_coltype_literals.q.out            |  4 +-
 ql/src/test/results/clientpositive/pcr.q.out    | 10 ++--
 ql/src/test/results/clientpositive/pcs.q.out    | 38 ++++++-------
 .../clientpositive/ppd_join_filter.q.out        | 32 +++++------
 ql/src/test/results/clientpositive/ppd_vc.q.out | 20 +++----
 .../clientpositive/ppr_allchildsarenull.q.out   | 12 ++---
 .../clientpositive/rand_partitionpruner1.q.out  |  4 +-
 .../clientpositive/rand_partitionpruner2.q.out  |  4 +-
 .../clientpositive/rand_partitionpruner3.q.out  |  4 +-
 .../clientpositive/reduce_deduplicate.q.out     |  4 +-
 .../results/clientpositive/regexp_extract.q.out |  8 +--
 .../clientpositive/router_join_ppr.q.out        | 36 ++++++-------
 .../test/results/clientpositive/sample1.q.out   |  2 +-
 .../test/results/clientpositive/sample2.q.out   |  4 +-
 .../test/results/clientpositive/sample4.q.out   |  4 +-
 .../test/results/clientpositive/sample5.q.out   |  4 +-
 .../test/results/clientpositive/sample6.q.out   | 32 +++++------
 .../test/results/clientpositive/sample7.q.out   |  4 +-
 .../test/results/clientpositive/sample8.q.out   |  8 +--
 .../test/results/clientpositive/sample9.q.out   |  4 +-
 .../clientpositive/schema_evol_stats.q.out      |  8 +--
 .../clientpositive/serde_user_properties.q.out  | 12 ++---
 .../results/clientpositive/spark/bucket2.q.out  |  4 +-
 .../results/clientpositive/spark/bucket3.q.out  |  4 +-
 .../results/clientpositive/spark/bucket4.q.out  |  4 +-
 .../results/clientpositive/spark/ctas.q.out     |  4 +-
 .../spark/disable_merge_for_bucketing.q.out     |  4 +-
 .../clientpositive/spark/groupby_map_ppr.q.out  |  4 +-
 .../spark/groupby_map_ppr_multi_distinct.q.out  |  4 +-
 .../clientpositive/spark/groupby_ppr.q.out      |  4 +-
 .../spark/groupby_ppr_multi_distinct.q.out      |  4 +-
 .../clientpositive/spark/input_part2.q.out      |  4 +-
 .../results/clientpositive/spark/join17.q.out   |  8 +--
 .../results/clientpositive/spark/join26.q.out   | 10 ++--
 .../results/clientpositive/spark/join32.q.out   | 10 ++--
 .../clientpositive/spark/join32_lessSize.q.out  | 46 ++++++++--------
 .../results/clientpositive/spark/join33.q.out   | 10 ++--
 .../results/clientpositive/spark/join34.q.out   | 12 ++---
 .../results/clientpositive/spark/join35.q.out   | 12 ++---
 .../results/clientpositive/spark/join9.q.out    |  6 +--
 .../clientpositive/spark/join_map_ppr.q.out     | 12 ++---
 .../clientpositive/spark/load_dyn_part8.q.out   |  8 +--
 .../clientpositive/spark/louter_join_ppr.q.out  | 36 ++++++-------
 .../clientpositive/spark/mapjoin_mapjoin.q.out  | 16 +++---
 .../spark/optimize_nullscan.q.out               | 56 ++++++++++----------
 .../test/results/clientpositive/spark/pcr.q.out | 10 ++--
 .../clientpositive/spark/ppd_join_filter.q.out  | 32 +++++------
 .../clientpositive/spark/router_join_ppr.q.out  | 36 ++++++-------
 .../results/clientpositive/spark/sample1.q.out  |  2 +-
 .../results/clientpositive/spark/sample2.q.out  |  4 +-
 .../results/clientpositive/spark/sample4.q.out  |  4 +-
 .../results/clientpositive/spark/sample5.q.out  |  4 +-
 .../results/clientpositive/spark/sample6.q.out  | 32 +++++------
 .../results/clientpositive/spark/sample7.q.out  |  4 +-
 .../results/clientpositive/spark/sample8.q.out  | 10 ++--
 .../results/clientpositive/spark/stats0.q.out   |  8 +--
 .../clientpositive/spark/stats_only_null.q.out  |  4 +-
 .../spark/subquery_multiinsert.q.out            |  4 +-
 .../clientpositive/spark/transform_ppr1.q.out   |  8 +--
 .../clientpositive/spark/transform_ppr2.q.out   |  4 +-
 .../spark/vector_cast_constant.q.out            |  2 +-
 ql/src/test/results/clientpositive/stats0.q.out |  8 +--
 .../clientpositive/stats_invalidation.q.out     |  2 +-
 .../clientpositive/stats_only_null.q.out        |  4 +-
 .../results/clientpositive/tez/bucket3.q.out    |  4 +-
 .../results/clientpositive/tez/bucket4.q.out    |  4 +-
 .../test/results/clientpositive/tez/ctas.q.out  |  4 +-
 .../tez/disable_merge_for_bucketing.q.out       |  4 +-
 .../clientpositive/tez/mapjoin_mapjoin.q.out    | 16 +++---
 .../clientpositive/tez/optimize_nullscan.q.out  | 56 ++++++++++----------
 .../results/clientpositive/tez/sample1.q.out    |  2 +-
 .../clientpositive/tez/schema_evol_stats.q.out  |  8 +--
 .../clientpositive/tez/stats_only_null.q.out    |  4 +-
 .../clientpositive/tez/transform_ppr1.q.out     |  8 +--
 .../clientpositive/tez/transform_ppr2.q.out     |  4 +-
 .../results/clientpositive/transform_ppr1.q.out |  8 +--
 .../results/clientpositive/transform_ppr2.q.out |  4 +-
 .../results/clientpositive/udf_explode.q.out    |  8 +--
 .../results/clientpositive/udtf_explode.q.out   |  8 +--
 .../test/results/clientpositive/union_ppr.q.out |  4 +-
 124 files changed, 684 insertions(+), 684 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/bucket1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket1.q.out b/ql/src/test/results/clientpositive/bucket1.q.out
index 78fb530..92ecd67 100644
--- a/ql/src/test/results/clientpositive/bucket1.q.out
+++ b/ql/src/test/results/clientpositive/bucket1.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -72,7 +72,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/bucket2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket2.q.out b/ql/src/test/results/clientpositive/bucket2.q.out
index 297984e..b849ed3 100644
--- a/ql/src/test/results/clientpositive/bucket2.q.out
+++ b/ql/src/test/results/clientpositive/bucket2.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -72,7 +72,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/bucket3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket3.q.out b/ql/src/test/results/clientpositive/bucket3.q.out
index 29afaea..fa8b0f9 100644
--- a/ql/src/test/results/clientpositive/bucket3.q.out
+++ b/ql/src/test/results/clientpositive/bucket3.q.out
@@ -52,7 +52,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -72,7 +72,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/bucket4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket4.q.out b/ql/src/test/results/clientpositive/bucket4.q.out
index 803a2bb..ec28d09 100644
--- a/ql/src/test/results/clientpositive/bucket4.q.out
+++ b/ql/src/test/results/clientpositive/bucket4.q.out
@@ -49,7 +49,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -69,7 +69,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/bucket5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket5.q.out b/ql/src/test/results/clientpositive/bucket5.q.out
index 2e37eef..bd1013a 100644
--- a/ql/src/test/results/clientpositive/bucket5.q.out
+++ b/ql/src/test/results/clientpositive/bucket5.q.out
@@ -94,7 +94,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -114,7 +114,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/bucket_many.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_many.q.out b/ql/src/test/results/clientpositive/bucket_many.q.out
index 9bd90b1..87954e0 100644
--- a/ql/src/test/results/clientpositive/bucket_many.q.out
+++ b/ql/src/test/results/clientpositive/bucket_many.q.out
@@ -48,7 +48,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -68,7 +68,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_1.q.out b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_1.q.out
index d812193..00f3776 100644
--- a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_1.q.out
+++ b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_1.q.out
@@ -204,7 +204,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"year\":\"true\",\"month\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"year\":\"true\",\"month\":\"true\"}}
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	21                  
@@ -429,7 +429,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"year\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"year\":\"true\"}}
 	numFiles            	2                   
 	numRows             	4                   
 	rawDataSize         	28                  
@@ -552,7 +552,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"month\":\"true\",\"year\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"month\":\"true\",\"year\":\"true\"}}
 	numFiles            	2                   
 	numRows             	4                   
 	rawDataSize         	28                  
@@ -737,7 +737,7 @@ Database:           	default
 Table:              	calendarp           	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"year\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"year\":\"true\"}}
 	numFiles            	1                   
 	numRows             	3                   
 	rawDataSize         	12                  

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
index 179bc66..48e7a40 100644
--- a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
+++ b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out
@@ -81,7 +81,7 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"year\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"year\":\"true\"}}
 	numFiles            	2                   
 	numRows             	3                   
 	rawDataSize         	24                  

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
index 2aa8d77..f90cdb6 100644
--- a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
+++ b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out
@@ -60,7 +60,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -80,7 +80,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -104,7 +104,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -124,7 +124,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 8d6c117..afd6d7c 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -731,7 +731,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -751,7 +751,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/describe_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/describe_table.q.out b/ql/src/test/results/clientpositive/describe_table.q.out
index 19664b0..ad3bfc1 100644
--- a/ql/src/test/results/clientpositive/describe_table.q.out
+++ b/ql/src/test/results/clientpositive/describe_table.q.out
@@ -230,7 +230,7 @@ Database:           	default
 Table:              	srcpart             	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                
@@ -315,7 +315,7 @@ Database:           	default
 Table:              	srcpart             	 
 #### A masked pattern was here ####
 Partition Parameters:	 	 
-	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"},\"BASIC_STATS\":\"true\"}
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 	numFiles            	1                   
 	numRows             	500                 
 	rawDataSize         	5312                

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
index c9aed0d..ba7c640 100644
--- a/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
+++ b/ql/src/test/results/clientpositive/disable_merge_for_bucketing.q.out
@@ -48,7 +48,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -68,7 +68,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
index fa2c77e..8f40040 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out
@@ -104,7 +104,7 @@ STAGE PLANS:
             partition values:
               year 2000
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -147,7 +147,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -217,7 +217,7 @@ STAGE PLANS:
             partition values:
               year 2000
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -260,7 +260,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -400,7 +400,7 @@ STAGE PLANS:
               year 2000
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -444,7 +444,7 @@ STAGE PLANS:
               year 2001
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -488,7 +488,7 @@ STAGE PLANS:
               year 2000
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -532,7 +532,7 @@ STAGE PLANS:
               year 2001
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -599,7 +599,7 @@ STAGE PLANS:
               year 2000
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -643,7 +643,7 @@ STAGE PLANS:
               year 2001
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -687,7 +687,7 @@ STAGE PLANS:
               year 2000
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -731,7 +731,7 @@ STAGE PLANS:
               year 2001
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
index 68652e9..0acfe90 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out
@@ -164,7 +164,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -207,7 +207,7 @@ STAGE PLANS:
             partition values:
               year 2002
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -363,7 +363,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -406,7 +406,7 @@ STAGE PLANS:
             partition values:
               year 2002
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -535,7 +535,7 @@ STAGE PLANS:
             partition values:
               year 2000
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -578,7 +578,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -621,7 +621,7 @@ STAGE PLANS:
             partition values:
               year 2002
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -664,7 +664,7 @@ STAGE PLANS:
             partition values:
               year 2003
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -730,7 +730,7 @@ STAGE PLANS:
             partition values:
               year 2000
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -773,7 +773,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -816,7 +816,7 @@ STAGE PLANS:
             partition values:
               year 2002
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -859,7 +859,7 @@ STAGE PLANS:
             partition values:
               year 2003
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true"}}
               bucket_count -1
               columns state,locid,zip
               columns.comments 
@@ -1176,7 +1176,7 @@ STAGE PLANS:
               year 2001
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -1396,7 +1396,7 @@ STAGE PLANS:
               year 2002
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -1683,7 +1683,7 @@ STAGE PLANS:
               year 2001
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 
@@ -1903,7 +1903,7 @@ STAGE PLANS:
               year 2002
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true"}}
               bucket_count -1
               columns state,locid
               columns.comments 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
index e82136f..70e4db3 100644
--- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
+++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial_ndv.q.out
@@ -232,7 +232,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"}}
               bucket_count -1
               columns state,locid,cnt,zip
               columns.comments 
@@ -275,7 +275,7 @@ STAGE PLANS:
             partition values:
               year 2002
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"}}
               bucket_count -1
               columns state,locid,cnt,zip
               columns.comments 
@@ -476,7 +476,7 @@ STAGE PLANS:
             partition values:
               year 2000
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"}}
               bucket_count -1
               columns state,locid,cnt,zip
               columns.comments 
@@ -519,7 +519,7 @@ STAGE PLANS:
             partition values:
               year 2001
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"}}
               bucket_count -1
               columns state,locid,cnt,zip
               columns.comments 
@@ -562,7 +562,7 @@ STAGE PLANS:
             partition values:
               year 2002
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"}}
               bucket_count -1
               columns state,locid,cnt,zip
               columns.comments 
@@ -605,7 +605,7 @@ STAGE PLANS:
             partition values:
               year 2003
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true","zip":"true"}}
               bucket_count -1
               columns state,locid,cnt,zip
               columns.comments 
@@ -993,7 +993,7 @@ STAGE PLANS:
               year 2001
               zip 94086
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true"}}
               bucket_count -1
               columns state,locid,cnt
               columns.comments 
@@ -1213,7 +1213,7 @@ STAGE PLANS:
               year 2002
               zip 94087
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"state":"true","locid":"true","cnt":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"state":"true","locid":"true","cnt":"true"}}
               bucket_count -1
               columns state,locid,cnt
               columns.comments 

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
index 45dbc36..048ab96 100644
--- a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
@@ -71,7 +71,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -91,7 +91,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -118,7 +118,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -164,7 +164,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -210,7 +210,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -256,7 +256,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -458,7 +458,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -478,7 +478,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -505,7 +505,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -551,7 +551,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -597,7 +597,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -643,7 +643,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -857,7 +857,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -877,7 +877,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -904,7 +904,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -950,7 +950,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1149,7 +1149,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1169,7 +1169,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1196,7 +1196,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1242,7 +1242,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
index 84999f2..24bf7a6 100644
--- a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
+++ b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -114,7 +114,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
index 5cf8bb1..c3cb7fb 100644
--- a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -114,7 +114,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/groupby_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_ppr.q.out b/ql/src/test/results/clientpositive/groupby_ppr.q.out
index a15b557..a4e9ff3 100644
--- a/ql/src/test/results/clientpositive/groupby_ppr.q.out
+++ b/ql/src/test/results/clientpositive/groupby_ppr.q.out
@@ -61,7 +61,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -107,7 +107,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
index 117b2cd..33d1ed0 100644
--- a/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
@@ -61,7 +61,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -107,7 +107,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/input23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input23.q.out b/ql/src/test/results/clientpositive/input23.q.out
index e03c9e7..25225d7 100644
--- a/ql/src/test/results/clientpositive/input23.q.out
+++ b/ql/src/test/results/clientpositive/input23.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/input42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input42.q.out b/ql/src/test/results/clientpositive/input42.q.out
index 866468d..8e91af0 100644
--- a/ql/src/test/results/clientpositive/input42.q.out
+++ b/ql/src/test/results/clientpositive/input42.q.out
@@ -23,7 +23,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -67,7 +67,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1148,7 +1148,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1192,7 +1192,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1655,7 +1655,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1699,7 +1699,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/input_part1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part1.q.out b/ql/src/test/results/clientpositive/input_part1.q.out
index d3efb0d..16c450b 100644
--- a/ql/src/test/results/clientpositive/input_part1.q.out
+++ b/ql/src/test/results/clientpositive/input_part1.q.out
@@ -83,7 +83,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/input_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part2.q.out b/ql/src/test/results/clientpositive/input_part2.q.out
index 74db456..0c069a5 100644
--- a/ql/src/test/results/clientpositive/input_part2.q.out
+++ b/ql/src/test/results/clientpositive/input_part2.q.out
@@ -143,7 +143,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -189,7 +189,7 @@ STAGE PLANS:
               ds 2008-04-09
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/input_part7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part7.q.out b/ql/src/test/results/clientpositive/input_part7.q.out
index 1429080..459e384 100644
--- a/ql/src/test/results/clientpositive/input_part7.q.out
+++ b/ql/src/test/results/clientpositive/input_part7.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -130,7 +130,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/input_part9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_part9.q.out b/ql/src/test/results/clientpositive/input_part9.q.out
index 920096e..f73d0e1 100644
--- a/ql/src/test/results/clientpositive/input_part9.q.out
+++ b/ql/src/test/results/clientpositive/input_part9.q.out
@@ -23,7 +23,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -67,7 +67,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 12
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join17.q.out b/ql/src/test/results/clientpositive/join17.q.out
index 2c03584..a827c67 100644
--- a/ql/src/test/results/clientpositive/join17.q.out
+++ b/ql/src/test/results/clientpositive/join17.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -98,7 +98,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join26.q.out b/ql/src/test/results/clientpositive/join26.q.out
index 86e51fb..781c0e5 100644
--- a/ql/src/test/results/clientpositive/join26.q.out
+++ b/ql/src/test/results/clientpositive/join26.q.out
@@ -146,7 +146,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out
index 8653c2f..bebb007 100644
--- a/ql/src/test/results/clientpositive/join32.q.out
+++ b/ql/src/test/results/clientpositive/join32.q.out
@@ -159,7 +159,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -179,7 +179,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -203,7 +203,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -223,7 +223,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -250,7 +250,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'

http://git-wip-us.apache.org/repos/asf/hive/blob/bd651756/ql/src/test/results/clientpositive/join32_lessSize.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out b/ql/src/test/results/clientpositive/join32_lessSize.q.out
index fd7bba7..357a84f 100644
--- a/ql/src/test/results/clientpositive/join32_lessSize.q.out
+++ b/ql/src/test/results/clientpositive/join32_lessSize.q.out
@@ -121,7 +121,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -141,7 +141,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -168,7 +168,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -314,7 +314,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -334,7 +334,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -605,7 +605,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -625,7 +625,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -734,7 +734,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -754,7 +754,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -881,7 +881,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -901,7 +901,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1170,7 +1170,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1190,7 +1190,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1214,7 +1214,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1234,7 +1234,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1270,7 +1270,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1410,7 +1410,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1696,7 +1696,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1716,7 +1716,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1740,7 +1740,7 @@ STAGE PLANS:
             input format: org.apache.hadoop.mapred.TextInputFormat
             output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'
@@ -1760,7 +1760,7 @@ STAGE PLANS:
               input format: org.apache.hadoop.mapred.TextInputFormat
               output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
               properties:
-                COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                 bucket_count -1
                 columns key,value
                 columns.comments 'default','default'
@@ -1796,7 +1796,7 @@ STAGE PLANS:
                     ds 2008-04-08
                     hr 11
                   properties:
-                    COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+                    COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
                     bucket_count -1
                     columns key,value
                     columns.comments 'default','default'
@@ -1936,7 +1936,7 @@ STAGE PLANS:
               ds 2008-04-08
               hr 11
             properties:
-              COLUMN_STATS_ACCURATE {"COLUMN_STATS":{"key":"true","value":"true"},"BASIC_STATS":"true"}
+              COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"key":"true","value":"true"}}
               bucket_count -1
               columns key,value
               columns.comments 'default','default'


[09/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query75.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query75.q.out b/ql/src/test/results/clientpositive/perf/query75.q.out
index e44a48e..802c1af 100644
--- a/ql/src/test/results/clientpositive/perf/query75.q.out
+++ b/ql/src/test/results/clientpositive/perf/query75.q.out
@@ -41,367 +41,351 @@ Stage-0
           <-Reducer 7 [SIMPLE_EDGE]
             SHUFFLE [RS_153]
               Select Operator [SEL_152] (rows=245965926 width=108)
-                Output:["_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
+                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
                 Filter Operator [FIL_151] (rows=245965926 width=108)
-                  predicate:(UDFToDouble((CAST( _col5 AS decimal(17,2)) / CAST( _col12 AS decimal(17,2)))) < 0.9)
+                  predicate:(UDFToDouble((CAST( _col4 AS decimal(17,2)) / CAST( _col10 AS decimal(17,2)))) < 0.9)
                   Merge Join Operator [MERGEJOIN_259] (rows=737897778 width=108)
-                    Conds:RS_148._col1, _col2, _col3, _col4=RS_149._col1, _col2, _col3, _col4(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col12","_col13"]
+                    Conds:RS_148._col0, _col1, _col2, _col3=RS_149._col0, _col1, _col2, _col3(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col10","_col11"]
                   <-Reducer 31 [SIMPLE_EDGE]
                     SHUFFLE [RS_149]
-                      PartitionCols:_col1, _col2, _col3, _col4
-                      Select Operator [SEL_147] (rows=670816148 width=108)
-                        Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                        Group By Operator [GBY_146] (rows=670816148 width=108)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:2001, KEY._col1, KEY._col2, KEY._col3, KEY._col4
-                        <-Union 30 [SIMPLE_EDGE]
-                          <-Reducer 29 [CONTAINS]
-                            Reduce Output Operator [RS_145]
-                              PartitionCols:2001, _col1, _col2, _col3, _col4
-                              Group By Operator [GBY_144] (rows=1341632296 width=108)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2001, _col1, _col2, _col3, _col4
-                                Select Operator [SEL_142] (rows=1341632296 width=108)
-                                  Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                                  Select Operator [SEL_95] (rows=383314495 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Merge Join Operator [MERGEJOIN_252] (rows=383314495 width=135)
-                                      Conds:RS_92._col2, _col1=RS_93._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
-                                    <-Map 34 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_93]
-                                        PartitionCols:_col1, _col0
-                                        Select Operator [SEL_85] (rows=28798881 width=106)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_232] (rows=28798881 width=106)
-                                            predicate:cr_item_sk is not null
-                                            TableScan [TS_83] (rows=28798881 width=106)
-                                              default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
-                                    <-Reducer 28 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_92]
-                                        PartitionCols:_col2, _col1
-                                        Merge Join Operator [MERGEJOIN_251] (rows=348467716 width=135)
-                                          Conds:RS_89._col1=RS_90._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
-                                        <-Map 33 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_90]
+                      PartitionCols:_col0, _col1, _col2, _col3
+                      Group By Operator [GBY_146] (rows=670816148 width=108)
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
+                      <-Union 30 [SIMPLE_EDGE]
+                        <-Reducer 29 [CONTAINS]
+                          Reduce Output Operator [RS_145]
+                            PartitionCols:_col0, _col1, _col2, _col3
+                            Group By Operator [GBY_144] (rows=1341632296 width=108)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3
+                              Select Operator [SEL_95] (rows=383314495 width=135)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_252] (rows=383314495 width=135)
+                                  Conds:RS_92._col2, _col1=RS_93._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
+                                <-Map 34 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_93]
+                                    PartitionCols:_col1, _col0
+                                    Select Operator [SEL_85] (rows=28798881 width=106)
+                                      Output:["_col0","_col1","_col2","_col3"]
+                                      Filter Operator [FIL_232] (rows=28798881 width=106)
+                                        predicate:cr_item_sk is not null
+                                        TableScan [TS_83] (rows=28798881 width=106)
+                                          default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
+                                <-Reducer 28 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_92]
+                                    PartitionCols:_col2, _col1
+                                    Merge Join Operator [MERGEJOIN_251] (rows=348467716 width=135)
+                                      Conds:RS_89._col1=RS_90._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
+                                    <-Map 33 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_90]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_82] (rows=231000 width=1436)
+                                          Output:["_col0","_col1","_col2","_col3","_col5"]
+                                          Filter Operator [FIL_231] (rows=231000 width=1436)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            TableScan [TS_80] (rows=462000 width=1436)
+                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+                                    <-Reducer 27 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_89]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_250] (rows=316788826 width=135)
+                                          Conds:RS_86._col0=RS_87._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 26 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_86]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_82] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2","_col3","_col5"]
-                                              Filter Operator [FIL_231] (rows=231000 width=1436)
-                                                predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
-                                                TableScan [TS_80] (rows=462000 width=1436)
-                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
-                                        <-Reducer 27 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_89]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_250] (rows=316788826 width=135)
-                                              Conds:RS_86._col0=RS_87._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                            <-Map 26 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_86]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_76] (rows=287989836 width=135)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_229] (rows=287989836 width=135)
-                                                    predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
-                                                    TableScan [TS_74] (rows=287989836 width=135)
-                                                      default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
-                                            <-Map 32 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_87]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_79] (rows=36524 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_230] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2001) and d_date_sk is not null)
-                                                    TableScan [TS_77] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                          <-Reducer 38 [CONTAINS]
-                            Reduce Output Operator [RS_145]
-                              PartitionCols:2001, _col1, _col2, _col3, _col4
-                              Group By Operator [GBY_144] (rows=1341632296 width=108)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2001, _col1, _col2, _col3, _col4
-                                Select Operator [SEL_142] (rows=1341632296 width=108)
-                                  Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                                  Select Operator [SEL_117] (rows=766650239 width=88)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Merge Join Operator [MERGEJOIN_255] (rows=766650239 width=88)
-                                      Conds:RS_114._col2, _col1=RS_115._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
-                                    <-Map 41 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_115]
-                                        PartitionCols:_col1, _col0
-                                        Select Operator [SEL_107] (rows=57591150 width=77)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_236] (rows=57591150 width=77)
-                                            predicate:sr_item_sk is not null
-                                            TableScan [TS_105] (rows=57591150 width=77)
-                                              default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
-                                    <-Reducer 37 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_114]
-                                        PartitionCols:_col2, _col1
-                                        Merge Join Operator [MERGEJOIN_254] (rows=696954748 width=88)
-                                          Conds:RS_111._col1=RS_112._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
-                                        <-Map 40 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_112]
+                                            Select Operator [SEL_76] (rows=287989836 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_229] (rows=287989836 width=135)
+                                                predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_74] (rows=287989836 width=135)
+                                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
+                                        <-Map 32 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_87]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_104] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2","_col3","_col5"]
-                                              Filter Operator [FIL_235] (rows=231000 width=1436)
-                                                predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
-                                                TableScan [TS_102] (rows=462000 width=1436)
-                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
-                                        <-Reducer 36 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_111]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_253] (rows=633595212 width=88)
-                                              Conds:RS_108._col0=RS_109._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                            <-Map 35 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_108]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_98] (rows=575995635 width=88)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_233] (rows=575995635 width=88)
-                                                    predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
-                                                    TableScan [TS_96] (rows=575995635 width=88)
-                                                      default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
-                                            <-Map 39 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_109]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_101] (rows=36524 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_234] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2001) and d_date_sk is not null)
-                                                    TableScan [TS_99] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                          <-Reducer 45 [CONTAINS]
-                            Reduce Output Operator [RS_145]
-                              PartitionCols:2001, _col1, _col2, _col3, _col4
-                              Group By Operator [GBY_144] (rows=1341632296 width=108)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2001, _col1, _col2, _col3, _col4
-                                Select Operator [SEL_142] (rows=1341632296 width=108)
-                                  Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                                  Select Operator [SEL_141] (rows=191667562 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Merge Join Operator [MERGEJOIN_258] (rows=191667562 width=135)
-                                      Conds:RS_138._col2, _col1=RS_139._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
-                                    <-Map 48 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_139]
-                                        PartitionCols:_col1, _col0
-                                        Select Operator [SEL_131] (rows=14398467 width=92)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_240] (rows=14398467 width=92)
-                                            predicate:wr_item_sk is not null
-                                            TableScan [TS_129] (rows=14398467 width=92)
-                                              default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
-                                    <-Reducer 44 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_138]
-                                        PartitionCols:_col2, _col1
-                                        Merge Join Operator [MERGEJOIN_257] (rows=174243235 width=135)
-                                          Conds:RS_135._col1=RS_136._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
-                                        <-Map 47 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_136]
+                                            Select Operator [SEL_79] (rows=36524 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_230] (rows=36524 width=1119)
+                                                predicate:((d_year = 2001) and d_date_sk is not null)
+                                                TableScan [TS_77] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                        <-Reducer 38 [CONTAINS]
+                          Reduce Output Operator [RS_145]
+                            PartitionCols:_col0, _col1, _col2, _col3
+                            Group By Operator [GBY_144] (rows=1341632296 width=108)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3
+                              Select Operator [SEL_117] (rows=766650239 width=88)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_255] (rows=766650239 width=88)
+                                  Conds:RS_114._col2, _col1=RS_115._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
+                                <-Map 41 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_115]
+                                    PartitionCols:_col1, _col0
+                                    Select Operator [SEL_107] (rows=57591150 width=77)
+                                      Output:["_col0","_col1","_col2","_col3"]
+                                      Filter Operator [FIL_236] (rows=57591150 width=77)
+                                        predicate:sr_item_sk is not null
+                                        TableScan [TS_105] (rows=57591150 width=77)
+                                          default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
+                                <-Reducer 37 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_114]
+                                    PartitionCols:_col2, _col1
+                                    Merge Join Operator [MERGEJOIN_254] (rows=696954748 width=88)
+                                      Conds:RS_111._col1=RS_112._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
+                                    <-Map 40 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_112]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_104] (rows=231000 width=1436)
+                                          Output:["_col0","_col1","_col2","_col3","_col5"]
+                                          Filter Operator [FIL_235] (rows=231000 width=1436)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            TableScan [TS_102] (rows=462000 width=1436)
+                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+                                    <-Reducer 36 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_111]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_253] (rows=633595212 width=88)
+                                          Conds:RS_108._col0=RS_109._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 35 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_108]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_128] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2","_col3","_col5"]
-                                              Filter Operator [FIL_239] (rows=231000 width=1436)
-                                                predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
-                                                TableScan [TS_126] (rows=462000 width=1436)
-                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
-                                        <-Reducer 43 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_135]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_256] (rows=158402938 width=135)
-                                              Conds:RS_132._col0=RS_133._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                            <-Map 42 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_132]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_122] (rows=144002668 width=135)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_237] (rows=144002668 width=135)
-                                                    predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
-                                                    TableScan [TS_120] (rows=144002668 width=135)
-                                                      default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
-                                            <-Map 46 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_133]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_125] (rows=36524 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_238] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2001) and d_date_sk is not null)
-                                                    TableScan [TS_123] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                            Select Operator [SEL_98] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_233] (rows=575995635 width=88)
+                                                predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
+                                                TableScan [TS_96] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
+                                        <-Map 39 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_109]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_101] (rows=36524 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_234] (rows=36524 width=1119)
+                                                predicate:((d_year = 2001) and d_date_sk is not null)
+                                                TableScan [TS_99] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                        <-Reducer 45 [CONTAINS]
+                          Reduce Output Operator [RS_145]
+                            PartitionCols:_col0, _col1, _col2, _col3
+                            Group By Operator [GBY_144] (rows=1341632296 width=108)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3
+                              Select Operator [SEL_141] (rows=191667562 width=135)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_258] (rows=191667562 width=135)
+                                  Conds:RS_138._col2, _col1=RS_139._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
+                                <-Map 48 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_139]
+                                    PartitionCols:_col1, _col0
+                                    Select Operator [SEL_131] (rows=14398467 width=92)
+                                      Output:["_col0","_col1","_col2","_col3"]
+                                      Filter Operator [FIL_240] (rows=14398467 width=92)
+                                        predicate:wr_item_sk is not null
+                                        TableScan [TS_129] (rows=14398467 width=92)
+                                          default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
+                                <-Reducer 44 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_138]
+                                    PartitionCols:_col2, _col1
+                                    Merge Join Operator [MERGEJOIN_257] (rows=174243235 width=135)
+                                      Conds:RS_135._col1=RS_136._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
+                                    <-Map 47 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_136]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_128] (rows=231000 width=1436)
+                                          Output:["_col0","_col1","_col2","_col3","_col5"]
+                                          Filter Operator [FIL_239] (rows=231000 width=1436)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            TableScan [TS_126] (rows=462000 width=1436)
+                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+                                    <-Reducer 43 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_135]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_256] (rows=158402938 width=135)
+                                          Conds:RS_132._col0=RS_133._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 42 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_132]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_122] (rows=144002668 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_237] (rows=144002668 width=135)
+                                                predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
+                                                TableScan [TS_120] (rows=144002668 width=135)
+                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
+                                        <-Map 46 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_133]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_125] (rows=36524 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_238] (rows=36524 width=1119)
+                                                predicate:((d_year = 2001) and d_date_sk is not null)
+                                                TableScan [TS_123] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
                   <-Reducer 6 [SIMPLE_EDGE]
                     SHUFFLE [RS_148]
-                      PartitionCols:_col1, _col2, _col3, _col4
-                      Select Operator [SEL_73] (rows=670816148 width=108)
-                        Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                        Group By Operator [GBY_72] (rows=670816148 width=108)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:2002, KEY._col1, KEY._col2, KEY._col3, KEY._col4
-                        <-Union 5 [SIMPLE_EDGE]
-                          <-Reducer 15 [CONTAINS]
-                            Reduce Output Operator [RS_71]
-                              PartitionCols:2002, _col1, _col2, _col3, _col4
-                              Group By Operator [GBY_70] (rows=1341632296 width=108)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2002, _col1, _col2, _col3, _col4
-                                Select Operator [SEL_68] (rows=1341632296 width=108)
-                                  Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                                  Select Operator [SEL_43] (rows=766650239 width=88)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Merge Join Operator [MERGEJOIN_246] (rows=766650239 width=88)
-                                      Conds:RS_40._col2, _col1=RS_41._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
-                                    <-Map 18 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_41]
-                                        PartitionCols:_col1, _col0
-                                        Select Operator [SEL_33] (rows=57591150 width=77)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_224] (rows=57591150 width=77)
-                                            predicate:sr_item_sk is not null
-                                            TableScan [TS_31] (rows=57591150 width=77)
-                                              default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
-                                    <-Reducer 14 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_40]
-                                        PartitionCols:_col2, _col1
-                                        Merge Join Operator [MERGEJOIN_245] (rows=696954748 width=88)
-                                          Conds:RS_37._col1=RS_38._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
-                                        <-Map 17 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_38]
+                      PartitionCols:_col0, _col1, _col2, _col3
+                      Group By Operator [GBY_72] (rows=670816148 width=108)
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
+                      <-Union 5 [SIMPLE_EDGE]
+                        <-Reducer 15 [CONTAINS]
+                          Reduce Output Operator [RS_71]
+                            PartitionCols:_col0, _col1, _col2, _col3
+                            Group By Operator [GBY_70] (rows=1341632296 width=108)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3
+                              Select Operator [SEL_43] (rows=766650239 width=88)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_246] (rows=766650239 width=88)
+                                  Conds:RS_40._col2, _col1=RS_41._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
+                                <-Map 18 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_41]
+                                    PartitionCols:_col1, _col0
+                                    Select Operator [SEL_33] (rows=57591150 width=77)
+                                      Output:["_col0","_col1","_col2","_col3"]
+                                      Filter Operator [FIL_224] (rows=57591150 width=77)
+                                        predicate:sr_item_sk is not null
+                                        TableScan [TS_31] (rows=57591150 width=77)
+                                          default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_quantity","sr_return_amt"]
+                                <-Reducer 14 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_40]
+                                    PartitionCols:_col2, _col1
+                                    Merge Join Operator [MERGEJOIN_245] (rows=696954748 width=88)
+                                      Conds:RS_37._col1=RS_38._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
+                                    <-Map 17 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_38]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_30] (rows=231000 width=1436)
+                                          Output:["_col0","_col1","_col2","_col3","_col5"]
+                                          Filter Operator [FIL_223] (rows=231000 width=1436)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            TableScan [TS_28] (rows=462000 width=1436)
+                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+                                    <-Reducer 13 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_37]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_244] (rows=633595212 width=88)
+                                          Conds:RS_34._col0=RS_35._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 12 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_34]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_24] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_221] (rows=575995635 width=88)
+                                                predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
+                                                TableScan [TS_22] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
+                                        <-Map 16 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_35]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_27] (rows=36524 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_222] (rows=36524 width=1119)
+                                                predicate:((d_year = 2002) and d_date_sk is not null)
+                                                TableScan [TS_25] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                        <-Reducer 22 [CONTAINS]
+                          Reduce Output Operator [RS_71]
+                            PartitionCols:_col0, _col1, _col2, _col3
+                            Group By Operator [GBY_70] (rows=1341632296 width=108)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3
+                              Select Operator [SEL_67] (rows=191667562 width=135)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_249] (rows=191667562 width=135)
+                                  Conds:RS_64._col2, _col1=RS_65._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
+                                <-Map 25 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_65]
+                                    PartitionCols:_col1, _col0
+                                    Select Operator [SEL_57] (rows=14398467 width=92)
+                                      Output:["_col0","_col1","_col2","_col3"]
+                                      Filter Operator [FIL_228] (rows=14398467 width=92)
+                                        predicate:wr_item_sk is not null
+                                        TableScan [TS_55] (rows=14398467 width=92)
+                                          default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
+                                <-Reducer 21 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_64]
+                                    PartitionCols:_col2, _col1
+                                    Merge Join Operator [MERGEJOIN_248] (rows=174243235 width=135)
+                                      Conds:RS_61._col1=RS_62._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
+                                    <-Map 24 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_62]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_54] (rows=231000 width=1436)
+                                          Output:["_col0","_col1","_col2","_col3","_col5"]
+                                          Filter Operator [FIL_227] (rows=231000 width=1436)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            TableScan [TS_52] (rows=462000 width=1436)
+                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+                                    <-Reducer 20 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_61]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_247] (rows=158402938 width=135)
+                                          Conds:RS_58._col0=RS_59._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 19 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_58]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_48] (rows=144002668 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_225] (rows=144002668 width=135)
+                                                predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
+                                                TableScan [TS_46] (rows=144002668 width=135)
+                                                  default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
+                                        <-Map 23 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_59]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_30] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2","_col3","_col5"]
-                                              Filter Operator [FIL_223] (rows=231000 width=1436)
-                                                predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
-                                                TableScan [TS_28] (rows=462000 width=1436)
-                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
-                                        <-Reducer 13 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_37]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_244] (rows=633595212 width=88)
-                                              Conds:RS_34._col0=RS_35._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                            <-Map 12 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_34]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_24] (rows=575995635 width=88)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_221] (rows=575995635 width=88)
-                                                    predicate:(ss_item_sk is not null and ss_sold_date_sk is not null)
-                                                    TableScan [TS_22] (rows=575995635 width=88)
-                                                      default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ticket_number","ss_quantity","ss_ext_sales_price"]
-                                            <-Map 16 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_35]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_27] (rows=36524 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_222] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2002) and d_date_sk is not null)
-                                                    TableScan [TS_25] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                          <-Reducer 22 [CONTAINS]
-                            Reduce Output Operator [RS_71]
-                              PartitionCols:2002, _col1, _col2, _col3, _col4
-                              Group By Operator [GBY_70] (rows=1341632296 width=108)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2002, _col1, _col2, _col3, _col4
-                                Select Operator [SEL_68] (rows=1341632296 width=108)
-                                  Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                                  Select Operator [SEL_67] (rows=191667562 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Merge Join Operator [MERGEJOIN_249] (rows=191667562 width=135)
-                                      Conds:RS_64._col2, _col1=RS_65._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
-                                    <-Map 25 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_65]
-                                        PartitionCols:_col1, _col0
-                                        Select Operator [SEL_57] (rows=14398467 width=92)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_228] (rows=14398467 width=92)
-                                            predicate:wr_item_sk is not null
-                                            TableScan [TS_55] (rows=14398467 width=92)
-                                              default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_quantity","wr_return_amt"]
-                                    <-Reducer 21 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_64]
-                                        PartitionCols:_col2, _col1
-                                        Merge Join Operator [MERGEJOIN_248] (rows=174243235 width=135)
-                                          Conds:RS_61._col1=RS_62._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
-                                        <-Map 24 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_62]
+                                            Select Operator [SEL_51] (rows=36524 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_226] (rows=36524 width=1119)
+                                                predicate:((d_year = 2002) and d_date_sk is not null)
+                                                TableScan [TS_49] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                        <-Reducer 4 [CONTAINS]
+                          Reduce Output Operator [RS_71]
+                            PartitionCols:_col0, _col1, _col2, _col3
+                            Group By Operator [GBY_70] (rows=1341632296 width=108)
+                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col0, _col1, _col2, _col3
+                              Select Operator [SEL_21] (rows=383314495 width=135)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_243] (rows=383314495 width=135)
+                                  Conds:RS_18._col2, _col1=RS_19._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
+                                <-Map 11 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_19]
+                                    PartitionCols:_col1, _col0
+                                    Select Operator [SEL_11] (rows=28798881 width=106)
+                                      Output:["_col0","_col1","_col2","_col3"]
+                                      Filter Operator [FIL_220] (rows=28798881 width=106)
+                                        predicate:cr_item_sk is not null
+                                        TableScan [TS_9] (rows=28798881 width=106)
+                                          default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
+                                <-Reducer 3 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_18]
+                                    PartitionCols:_col2, _col1
+                                    Merge Join Operator [MERGEJOIN_242] (rows=348467716 width=135)
+                                      Conds:RS_15._col1=RS_16._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
+                                    <-Map 10 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_16]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_8] (rows=231000 width=1436)
+                                          Output:["_col0","_col1","_col2","_col3","_col5"]
+                                          Filter Operator [FIL_219] (rows=231000 width=1436)
+                                            predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null)
+                                            TableScan [TS_6] (rows=462000 width=1436)
+                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
+                                    <-Reducer 2 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_15]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_241] (rows=316788826 width=135)
+                                          Conds:RS_12._col0=RS_13._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                        <-Map 1 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_12]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_54] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2","_col3","_col5"]
-                                              Filter Operator [FIL_227] (rows=231000 width=1436)
-                                                predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
-                                                TableScan [TS_52] (rows=462000 width=1436)
-                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
-                                        <-Reducer 20 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_61]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_247] (rows=158402938 width=135)
-                                              Conds:RS_58._col0=RS_59._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                            <-Map 19 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_58]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_48] (rows=144002668 width=135)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_225] (rows=144002668 width=135)
-                                                    predicate:(ws_item_sk is not null and ws_sold_date_sk is not null)
-                                                    TableScan [TS_46] (rows=144002668 width=135)
-                                                      default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_order_number","ws_quantity","ws_ext_sales_price"]
-                                            <-Map 23 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_59]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_51] (rows=36524 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_226] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2002) and d_date_sk is not null)
-                                                    TableScan [TS_49] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                          <-Reducer 4 [CONTAINS]
-                            Reduce Output Operator [RS_71]
-                              PartitionCols:2002, _col1, _col2, _col3, _col4
-                              Group By Operator [GBY_70] (rows=1341632296 width=108)
-                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col6)"],keys:2002, _col1, _col2, _col3, _col4
-                                Select Operator [SEL_68] (rows=1341632296 width=108)
-                                  Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
-                                  Select Operator [SEL_21] (rows=383314495 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                    Merge Join Operator [MERGEJOIN_243] (rows=383314495 width=135)
-                                      Conds:RS_18._col2, _col1=RS_19._col1, _col0(Left Outer),Output:["_col3","_col4","_col8","_col9","_col10","_col12","_col15","_col16"]
-                                    <-Map 11 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_19]
-                                        PartitionCols:_col1, _col0
-                                        Select Operator [SEL_11] (rows=28798881 width=106)
-                                          Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_220] (rows=28798881 width=106)
-                                            predicate:cr_item_sk is not null
-                                            TableScan [TS_9] (rows=28798881 width=106)
-                                              default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_quantity","cr_return_amount"]
-                                    <-Reducer 3 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_18]
-                                        PartitionCols:_col2, _col1
-                                        Merge Join Operator [MERGEJOIN_242] (rows=348467716 width=135)
-                                          Conds:RS_15._col1=RS_16._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col8","_col9","_col10","_col12"]
-                                        <-Map 10 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_16]
+                                            Select Operator [SEL_2] (rows=287989836 width=135)
+                                              Output:["_col0","_col1","_col2","_col3","_col4"]
+                                              Filter Operator [FIL_217] (rows=287989836 width=135)
+                                                predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
+                                                TableScan [TS_0] (rows=287989836 width=135)
+                                                  default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
+                                        <-Map 9 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_13]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_8] (rows=231000 width=1436)
-                                              Output:["_col0","_col1","_col2","_col3","_col5"]
-                                              Filter Operator [FIL_219] (rows=231000 width=1436)
-                                                predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
-                                                TableScan [TS_6] (rows=462000 width=1436)
-                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
-                                        <-Reducer 2 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_15]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_241] (rows=316788826 width=135)
-                                              Conds:RS_12._col0=RS_13._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
-                                            <-Map 1 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_12]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_2] (rows=287989836 width=135)
-                                                  Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_217] (rows=287989836 width=135)
-                                                    predicate:(cs_item_sk is not null and cs_sold_date_sk is not null)
-                                                    TableScan [TS_0] (rows=287989836 width=135)
-                                                      default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_order_number","cs_quantity","cs_ext_sales_price"]
-                                            <-Map 9 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_13]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_5] (rows=36524 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_218] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2002) and d_date_sk is not null)
-                                                    TableScan [TS_3] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
+                                            Select Operator [SEL_5] (rows=36524 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_218] (rows=36524 width=1119)
+                                                predicate:((d_year = 2002) and d_date_sk is not null)
+                                                TableScan [TS_3] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
 


[39/66] [abbrv] hive git commit: HIVE-13549: Remove jdk version specific out files from Hive2 (Mohit Sabharwal, reviewed by Sergio Pena)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/stats_list_bucket.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.out
new file mode 100644
index 0000000..c34c414
--- /dev/null
+++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.out
@@ -0,0 +1,189 @@
+PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23)
+
+drop table stats_list_bucket
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: drop table stats_list_bucket_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table stats_list_bucket_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table stats_list_bucket (
+  c1 string,
+  c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_list_bucket
+POSTHOOK: query: create table stats_list_bucket (
+  c1 string,
+  c2 string
+) partitioned by (ds string, hr string)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_list_bucket
+PREHOOK: query: -- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
+  select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
+POSTHOOK: query: -- Try partitioned table with list bucketing.
+-- The stats should show 500 rows loaded, as many rows as the src table has.
+
+insert overwrite table stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
+  select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11
+POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: stats_list_bucket PARTITION(ds=2008-04-08,hr=11).c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_list_bucket
+POSTHOOK: query: desc formatted stats_list_bucket partition (ds = '2008-04-08',  hr = '11')
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_list_bucket
+# col_name            	data_type           	comment             
+	 	 
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Partition Information	 	 
+# col_name            	data_type           	comment             
+	 	 
+ds                  	string              	                    
+hr                  	string              	                    
+	 	 
+# Detailed Partition Information	 	 
+Partition Value:    	[2008-04-08, 11]    	 
+Database:           	default             	 
+Table:              	stats_list_bucket   	 
+#### A masked pattern was here ####
+Partition Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	4812                
+	totalSize           	5522                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[c1, c2]            	 
+Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[466, val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [82, val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82, [287, val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: -- Also try non-partitioned table with list bucketing.
+-- Stats should show the same number of rows.
+
+create table stats_list_bucket_1 (
+  c1 string,
+  c2 string
+)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats_list_bucket_1
+POSTHOOK: query: -- Also try non-partitioned table with list bucketing.
+-- Stats should show the same number of rows.
+
+create table stats_list_bucket_1 (
+  c1 string,
+  c2 string
+)
+skewed by (c1, c2) on  (('466','val_466'),('287','val_287'),('82','val_82'))
+stored as directories
+stored as rcfile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats_list_bucket_1
+PREHOOK: query: insert overwrite table stats_list_bucket_1
+  select key, value from src
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@stats_list_bucket_1
+POSTHOOK: query: insert overwrite table stats_list_bucket_1
+  select key, value from src
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@stats_list_bucket_1
+POSTHOOK: Lineage: stats_list_bucket_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: stats_list_bucket_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: desc formatted stats_list_bucket_1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats_list_bucket_1
+POSTHOOK: query: desc formatted stats_list_bucket_1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats_list_bucket_1
+# col_name            	data_type           	comment             
+	 	 
+c1                  	string              	                    
+c2                  	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	numFiles            	4                   
+	numRows             	500                 
+	rawDataSize         	4812                
+	totalSize           	5522                
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe	 
+InputFormat:        	org.apache.hadoop.hive.ql.io.RCFileInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.RCFileOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Stored As SubDirectories:	Yes                 	 
+Skewed Columns:     	[c1, c2]            	 
+Skewed Values:      	[[466, val_466], [287, val_287], [82, val_82]]	 
+#### A masked pattern was here ####
+Skewed Value to Truncated Path:	{[466, val_466]=/stats_list_bucket_1/c1=466/c2=val_466, [287, val_287]=/stats_list_bucket_1/c1=287/c2=val_287, [82, val_82]=/stats_list_bucket_1/c1=82/c2=val_82}	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: drop table stats_list_bucket
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@stats_list_bucket
+PREHOOK: Output: default@stats_list_bucket
+POSTHOOK: query: drop table stats_list_bucket
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@stats_list_bucket
+POSTHOOK: Output: default@stats_list_bucket
+PREHOOK: query: drop table stats_list_bucket_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@stats_list_bucket_1
+PREHOOK: Output: default@stats_list_bucket_1
+POSTHOOK: query: drop table stats_list_bucket_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@stats_list_bucket_1
+POSTHOOK: Output: default@stats_list_bucket_1

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out b/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
deleted file mode 100644
index 652acbb..0000000
--- a/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
+++ /dev/null
@@ -1,220 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-desc function str_to_map
-PREHOOK: type: DESCFUNCTION
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-desc function str_to_map
-POSTHOOK: type: DESCFUNCTION
-str_to_map(text, delimiter1, delimiter2) - Creates a map by parsing text 
-PREHOOK: query: desc function extended str_to_map
-PREHOOK: type: DESCFUNCTION
-POSTHOOK: query: desc function extended str_to_map
-POSTHOOK: type: DESCFUNCTION
-str_to_map(text, delimiter1, delimiter2) - Creates a map by parsing text 
-Split text into key-value pairs using two delimiters. The first delimiter seperates pairs, and the second delimiter sperates key and value. If only one parameter is given, default delimiters are used: ',' as delimiter1 and '=' as delimiter2.
-PREHOOK: query: explain select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        TableScan
-          alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: str_to_map('a=1,b=2,c=3',',','=')['a'] (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 42500 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 3
-              Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
-              ListSink
-
-PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-1
-1
-1
-PREHOOK: query: explain select str_to_map('a:1,b:2,c:3') from src limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map('a:1,b:2,c:3') from src limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        TableScan
-          alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: str_to_map('a:1,b:2,c:3') (type: map<string,string>)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 3
-              Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE
-              ListSink
-
-PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-{"b":"2","a":"1","c":"3"}
-{"b":"2","a":"1","c":"3"}
-{"b":"2","a":"1","c":"3"}
-PREHOOK: query: explain select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        TableScan
-          alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 3
-              Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE
-              ListSink
-
-PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-{"b":"2","a":"1","c":"3"}
-{"b":"2","a":"1","c":"3"}
-{"b":"2","a":"1","c":"3"}
-PREHOOK: query: explain select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: 'a:1,b:2,c:3' (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
-              Transform Operator
-                command: cat
-                output info:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
-                Select Operator
-                  expressions: str_to_map(_col0,',',':')['a'] (type: string)
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE
-                  Limit
-                    Number of rows: 3
-                    Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-1
-1
-1
-PREHOOK: query: drop table tbl_s2m
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table tbl_s2m
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows)
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl_s2m
-POSTHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows)
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl_s2m
-POSTHOOK: Lineage: tbl_s2m.t SIMPLE []
-PREHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tbl_s2m
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tbl_s2m
-#### A masked pattern was here ####
-444
-444
-444
-PREHOOK: query: drop table tbl_s2m
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@tbl_s2m
-PREHOOK: Output: default@tbl_s2m
-POSTHOOK: query: drop table tbl_s2m
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@tbl_s2m
-POSTHOOK: Output: default@tbl_s2m

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/str_to_map.q.java1.8.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/str_to_map.q.java1.8.out b/ql/src/test/results/clientpositive/str_to_map.q.java1.8.out
deleted file mode 100644
index 23b0cbb..0000000
--- a/ql/src/test/results/clientpositive/str_to_map.q.java1.8.out
+++ /dev/null
@@ -1,219 +0,0 @@
-PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-desc function str_to_map
-PREHOOK: type: DESCFUNCTION
-POSTHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
-
-desc function str_to_map
-POSTHOOK: type: DESCFUNCTION
-str_to_map(text, delimiter1, delimiter2) - Creates a map by parsing text 
-PREHOOK: query: desc function extended str_to_map
-PREHOOK: type: DESCFUNCTION
-POSTHOOK: query: desc function extended str_to_map
-POSTHOOK: type: DESCFUNCTION
-str_to_map(text, delimiter1, delimiter2) - Creates a map by parsing text 
-Split text into key-value pairs using two delimiters. The first delimiter seperates pairs, and the second delimiter sperates key and value. If only one parameter is given, default delimiters are used: ',' as delimiter1 and '=' as delimiter2.
-PREHOOK: query: explain select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        TableScan
-          alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: str_to_map('a=1,b=2,c=3',',','=')['a'] (type: string)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 3
-              Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
-              ListSink
-
-PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-1
-1
-1
-PREHOOK: query: explain select str_to_map('a:1,b:2,c:3') from src limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map('a:1,b:2,c:3') from src limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        TableScan
-          alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: str_to_map('a:1,b:2,c:3') (type: map<string,string>)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 460000 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 3
-              Statistics: Num rows: 3 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE
-              ListSink
-
-PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-{"a":"1","b":"2","c":"3"}
-{"a":"1","b":"2","c":"3"}
-{"a":"1","b":"2","c":"3"}
-PREHOOK: query: explain select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        TableScan
-          alias: src
-          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-          Select Operator
-            expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>)
-            outputColumnNames: _col0
-            Statistics: Num rows: 500 Data size: 460000 Basic stats: COMPLETE Column stats: COMPLETE
-            Limit
-              Number of rows: 3
-              Statistics: Num rows: 3 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE
-              ListSink
-
-PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-{"a":"1","b":"2","c":"3"}
-{"a":"1","b":"2","c":"3"}
-{"a":"1","b":"2","c":"3"}
-PREHOOK: query: explain select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: src
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              expressions: 'a:1,b:2,c:3' (type: string)
-              outputColumnNames: _col0
-              Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
-              Transform Operator
-                command: cat
-                output info:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
-                Select Operator
-                  expressions: str_to_map(_col0,',',':')['a'] (type: string)
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE
-                  Limit
-                    Number of rows: 3
-                    Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: 3
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map(t.ss,',',':')['a']
-from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
-limit 3
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-#### A masked pattern was here ####
-1
-1
-1
-PREHOOK: query: drop table tbl_s2m
-PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table tbl_s2m
-POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows)
-PREHOOK: type: CREATETABLE_AS_SELECT
-PREHOOK: Input: default@src
-PREHOOK: Output: database:default
-PREHOOK: Output: default@tbl_s2m
-POSTHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows)
-POSTHOOK: type: CREATETABLE_AS_SELECT
-POSTHOOK: Input: default@src
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@tbl_s2m
-PREHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
-PREHOOK: type: QUERY
-PREHOOK: Input: default@tbl_s2m
-#### A masked pattern was here ####
-POSTHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@tbl_s2m
-#### A masked pattern was here ####
-444
-444
-444
-PREHOOK: query: drop table tbl_s2m
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@tbl_s2m
-PREHOOK: Output: default@tbl_s2m
-POSTHOOK: query: drop table tbl_s2m
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@tbl_s2m
-POSTHOOK: Output: default@tbl_s2m

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/str_to_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/str_to_map.q.out b/ql/src/test/results/clientpositive/str_to_map.q.out
new file mode 100644
index 0000000..30c98db
--- /dev/null
+++ b/ql/src/test/results/clientpositive/str_to_map.q.out
@@ -0,0 +1,216 @@
+PREHOOK: query: desc function str_to_map
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: desc function str_to_map
+POSTHOOK: type: DESCFUNCTION
+str_to_map(text, delimiter1, delimiter2) - Creates a map by parsing text 
+PREHOOK: query: desc function extended str_to_map
+PREHOOK: type: DESCFUNCTION
+POSTHOOK: query: desc function extended str_to_map
+POSTHOOK: type: DESCFUNCTION
+str_to_map(text, delimiter1, delimiter2) - Creates a map by parsing text 
+Split text into key-value pairs using two delimiters. The first delimiter seperates pairs, and the second delimiter sperates key and value. If only one parameter is given, default delimiters are used: ',' as delimiter1 and '=' as delimiter2.
+PREHOOK: query: explain select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: str_to_map('a=1,b=2,c=3',',','=')['a'] (type: string)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 42500 Basic stats: COMPLETE Column stats: COMPLETE
+            Limit
+              Number of rows: 3
+              Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
+              ListSink
+
+PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1
+1
+1
+PREHOOK: query: explain select str_to_map('a:1,b:2,c:3') from src limit 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select str_to_map('a:1,b:2,c:3') from src limit 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: str_to_map('a:1,b:2,c:3') (type: map<string,string>)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE
+            Limit
+              Number of rows: 3
+              Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE
+              ListSink
+
+PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"a":"1","b":"2","c":"3"}
+{"a":"1","b":"2","c":"3"}
+{"a":"1","b":"2","c":"3"}
+PREHOOK: query: explain select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        TableScan
+          alias: src
+          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+          Select Operator
+            expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>)
+            outputColumnNames: _col0
+            Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE
+            Limit
+              Number of rows: 3
+              Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE
+              ListSink
+
+PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"a":"1","b":"2","c":"3"}
+{"a":"1","b":"2","c":"3"}
+{"a":"1","b":"2","c":"3"}
+PREHOOK: query: explain select str_to_map(t.ss,',',':')['a']
+from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
+limit 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select str_to_map(t.ss,',',':')['a']
+from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
+limit 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: 'a:1,b:2,c:3' (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+              Transform Operator
+                command: cat
+                output info:
+                    input format: org.apache.hadoop.mapred.TextInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
+                Select Operator
+                  expressions: str_to_map(_col0,',',':')['a'] (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Limit
+                    Number of rows: 3
+                    Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 3
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select str_to_map(t.ss,',',':')['a']
+from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
+limit 3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select str_to_map(t.ss,',',':')['a']
+from (select transform('a:1,b:2,c:3') using 'cat' as (ss) from src) t
+limit 3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+1
+1
+1
+PREHOOK: query: drop table tbl_s2m
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tbl_s2m
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows)
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tbl_s2m
+POSTHOOK: query: create table tbl_s2m as select 'ABC=CC_333=444' as t from src tablesample (3 rows)
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tbl_s2m
+POSTHOOK: Lineage: tbl_s2m.t SIMPLE []
+PREHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tbl_s2m
+#### A masked pattern was here ####
+POSTHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tbl_s2m
+#### A masked pattern was here ####
+444
+444
+444
+PREHOOK: query: drop table tbl_s2m
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@tbl_s2m
+PREHOOK: Output: default@tbl_s2m
+POSTHOOK: query: drop table tbl_s2m
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@tbl_s2m
+POSTHOOK: Output: default@tbl_s2m

http://git-wip-us.apache.org/repos/asf/hive/blob/9687dcc9/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.7.out
deleted file mode 100644
index 279843b..0000000
--- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.java1.7.out
+++ /dev/null
@@ -1,999 +0,0 @@
-PREHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_4
-POSTHOOK: query: -- SORT_QUERY_RESULTS
--- JAVA_VERSION_SPECIFIC_OUTPUT
-
-CREATE TABLE src_4(
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_4
-RUN: Stage-0:DDL
-PREHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@src_5
-POSTHOOK: query: CREATE TABLE src_5( 
-  key STRING, 
-  value STRING
-)
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@src_5
-RUN: Stage-0:DDL
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-10 is a root stage
-  Stage-2 depends on stages: Stage-10
-  Stage-3 depends on stages: Stage-2
-  Stage-4 depends on stages: Stage-3
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-6 depends on stages: Stage-2
-  Stage-0 depends on stages: Stage-6
-  Stage-7 depends on stages: Stage-0
-
-STAGE PLANS:
-  Stage: Stage-10
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '2') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: 0 (type: bigint)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Semi Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-3
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Map-reduce partition columns: _col0 (type: string)
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key > '2') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col0 (type: string)
-                  sort order: +
-                  Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Outer Join0 to 1
-          keys:
-            0 _col0 (type: string)
-            1 _col0 (type: string)
-          outputColumnNames: _col0, _col1, _col5
-          Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: _col5 is null (type: boolean)
-            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col0 (type: string), _col1 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col0 (type: string)
-              sort order: +
-              Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col1 (type: string)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.src_5
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-6
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: key (type: string), value (type: string)
-              sort order: ++
-              Map-reduce partition columns: key (type: string), value (type: string)
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '9') and value is not null) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Semi Join 0 to 1
-          keys:
-            0 key (type: string), value (type: string)
-            1 _col0 (type: string), _col1 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.src_4
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-7
-    Stats-Aggr Operator
-
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-10:MAPRED
-RUN: Stage-2:MAPRED
-RUN: Stage-3:MAPRED
-RUN: Stage-6:MAPRED
-RUN: Stage-4:MAPRED
-RUN: Stage-0:MOVE
-RUN: Stage-1:MOVE
-RUN: Stage-7:STATS
-RUN: Stage-5:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2
-Warning: Map Join MAPJOIN[55][bigTable=b] in task 'Stage-13:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-POSTHOOK: query: explain
-from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-10 is a root stage
-  Stage-14 depends on stages: Stage-10 , consists of Stage-17, Stage-2
-  Stage-17 has a backup stage: Stage-2
-  Stage-13 depends on stages: Stage-17
-  Stage-15 depends on stages: Stage-2, Stage-13
-  Stage-12 depends on stages: Stage-15
-  Stage-0 depends on stages: Stage-12
-  Stage-7 depends on stages: Stage-0
-  Stage-16 depends on stages: Stage-2, Stage-13
-  Stage-4 depends on stages: Stage-16
-  Stage-1 depends on stages: Stage-4
-  Stage-5 depends on stages: Stage-1
-  Stage-2
-
-STAGE PLANS:
-  Stage: Stage-10
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '2') and key is null) (type: boolean)
-              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  mode: hash
-                  outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: bigint)
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-          Filter Operator
-            predicate: (_col0 = 0) (type: boolean)
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                keys: 0 (type: bigint)
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-14
-    Conditional Operator
-
-  Stage: Stage-17
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        $INTNAME 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        $INTNAME 
-          TableScan
-            HashTable Sink Operator
-              keys:
-                0 
-                1 
-
-  Stage: Stage-13
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Map Join Operator
-              condition map:
-                   Left Semi Join 0 to 1
-              keys:
-                0 
-                1 
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-15
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        sq_1:a 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        sq_1:a 
-          TableScan
-            alias: a
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: ((key > '9') and value is not null) (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  HashTable Sink Operator
-                    keys:
-                      0 key (type: string), value (type: string)
-                      1 _col0 (type: string), _col1 (type: string)
-
-  Stage: Stage-12
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Map Join Operator
-              condition map:
-                   Left Semi Join 0 to 1
-              keys:
-                0 key (type: string), value (type: string)
-                1 _col0 (type: string), _col1 (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                table:
-                    input format: org.apache.hadoop.mapred.TextInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                    name: default.src_4
-      Local Work:
-        Map Reduce Local Work
-
-  Stage: Stage-0
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_4
-
-  Stage: Stage-7
-    Stats-Aggr Operator
-
-  Stage: Stage-16
-    Map Reduce Local Work
-      Alias -> Map Local Tables:
-        sq_2:s1 
-          Fetch Operator
-            limit: -1
-      Alias -> Map Local Operator Tree:
-        sq_2:s1 
-          TableScan
-            alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Filter Operator
-              predicate: (key > '2') (type: boolean)
-              Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
-                Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                HashTable Sink Operator
-                  keys:
-                    0 _col0 (type: string)
-                    1 _col0 (type: string)
-
-  Stage: Stage-4
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Map Join Operator
-              condition map:
-                   Left Outer Join0 to 1
-              keys:
-                0 _col0 (type: string)
-                1 _col0 (type: string)
-              outputColumnNames: _col0, _col1, _col5
-              Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: _col5 is null (type: boolean)
-                Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string), _col1 (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: string)
-      Local Work:
-        Map Reduce Local Work
-      Reduce Operator Tree:
-        Select Operator
-          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.src_5
-
-  Stage: Stage-1
-    Move Operator
-      tables:
-          replace: true
-          table:
-              input format: org.apache.hadoop.mapred.TextInputFormat
-              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-              name: default.src_5
-
-  Stage: Stage-5
-    Stats-Aggr Operator
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: b
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              value expressions: key (type: string), value (type: string)
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-          TableScan
-            Reduce Output Operator
-              sort order: 
-              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-      Reduce Operator Tree:
-        Join Operator
-          condition map:
-               Left Semi Join 0 to 1
-          keys:
-            0 
-            1 
-          outputColumnNames: _col0, _col1
-          Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-Warning: Map Join MAPJOIN[55][bigTable=b] in task 'Stage-13:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[31][tables = [b, sq_2_notin_nullcheck]] in Stage 'Stage-2:MAPRED' is a cross product
-PREHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src
-PREHOOK: Output: default@src_4
-PREHOOK: Output: default@src_5
-POSTHOOK: query: from src b 
-INSERT OVERWRITE TABLE src_4 
-  select * 
-  where b.key in 
-   (select a.key 
-    from src a 
-    where b.value = a.value and a.key > '9'
-   ) 
-INSERT OVERWRITE TABLE src_5 
-  select *  
-  where b.key not in  ( select key from src s1 where s1.key > '2') 
-  order by key
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src
-POSTHOOK: Output: default@src_4
-POSTHOOK: Output: default@src_5
-POSTHOOK: Lineage: src_4.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_4.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.key SIMPLE [(src)b.FieldSchema(name:key, type:string, comment:default), ]
-POSTHOOK: Lineage: src_5.value SIMPLE [(src)b.FieldSchema(name:value, type:string, comment:default), ]
-RUN: Stage-10:MAPRED
-RUN: Stage-14:CONDITIONAL
-RUN: Stage-17:MAPREDLOCAL
-RUN: Stage-13:MAPRED
-RUN: Stage-15:MAPREDLOCAL
-RUN: Stage-16:MAPREDLOCAL
-RUN: Stage-12:MAPRED
-RUN: Stage-4:MAPRED
-RUN: Stage-0:MOVE
-RUN: Stage-1:MOVE
-RUN: Stage-7:STATS
-RUN: Stage-5:STATS
-PREHOOK: query: select * from src_4
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_4
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_4
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_4
-#### A masked pattern was here ####
-90	val_90
-90	val_90
-90	val_90
-92	val_92
-95	val_95
-95	val_95
-96	val_96
-97	val_97
-97	val_97
-98	val_98
-98	val_98
-PREHOOK: query: select * from src_5
-PREHOOK: type: QUERY
-PREHOOK: Input: default@src_5
-#### A masked pattern was here ####
-POSTHOOK: query: select * from src_5
-POSTHOOK: type: QUERY
-POSTHOOK: Input: default@src_5
-#### A masked pattern was here ####
-0	val_0
-0	val_0
-0	val_0
-10	val_10
-100	val_100
-100	val_100
-103	val_103
-103	val_103
-104	val_104
-104	val_104
-105	val_105
-11	val_11
-111	val_111
-113	val_113
-113	val_113
-114	val_114
-116	val_116
-118	val_118
-118	val_118
-119	val_119
-119	val_119
-119	val_119
-12	val_12
-12	val_12
-120	val_120
-120	val_120
-125	val_125
-125	val_125
-126	val_126
-128	val_128
-128	val_128
-128	val_128
-129	val_129
-129	val_129
-131	val_131
-133	val_133
-134	val_134
-134	val_134
-136	val_136
-137	val_137
-137	val_137
-138	val_138
-138	val_138
-138	val_138
-138	val_138
-143	val_143
-145	val_145
-146	val_146
-146	val_146
-149	val_149
-149	val_149
-15	val_15
-15	val_15
-150	val_150
-152	val_152
-152	val_152
-153	val_153
-155	val_155
-156	val_156
-157	val_157
-158	val_158
-160	val_160
-162	val_162
-163	val_163
-164	val_164
-164	val_164
-165	val_165
-165	val_165
-166	val_166
-167	val_167
-167	val_167
-167	val_167
-168	val_168
-169	val_169
-169	val_169
-169	val_169
-169	val_169
-17	val_17
-170	val_170
-172	val_172
-172	val_172
-174	val_174
-174	val_174
-175	val_175
-175	val_175
-176	val_176
-176	val_176
-177	val_177
-178	val_178
-179	val_179
-179	val_179
-18	val_18
-18	val_18
-180	val_180
-181	val_181
-183	val_183
-186	val_186
-187	val_187
-187	val_187
-187	val_187
-189	val_189
-19	val_19
-190	val_190
-191	val_191
-191	val_191
-192	val_192
-193	val_193
-193	val_193
-193	val_193
-194	val_194
-195	val_195
-195	val_195
-196	val_196
-197	val_197
-197	val_197
-199	val_199
-199	val_199
-199	val_199
-2	val_2


[10/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query66.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query66.q.out b/ql/src/test/results/clientpositive/perf/query66.q.out
index b19cc77..5eef7e7 100644
--- a/ql/src/test/results/clientpositive/perf/query66.q.out
+++ b/ql/src/test/results/clientpositive/perf/query66.q.out
@@ -465,168 +465,164 @@ Stage-0
           <-Reducer 8 [SIMPLE_EDGE]
             SHUFFLE [RS_73]
               Select Operator [SEL_72] (rows=158120068 width=135)
-                Output:["_col0","_col1","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col2","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col3","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col4","_col40","_col41","_col42","_col43","_col5","_col8","_col9"]
+                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
                 Group By Operator [GBY_71] (rows=158120068 width=135)
-                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)","sum(VALUE._col24)","sum(VALUE._col25)","sum(VALUE._col26)","sum(VALUE._col27)","sum(VALUE._col28)",
 "sum(VALUE._col29)","sum(VALUE._col30)","sum(VALUE._col31)","sum(VALUE._col32)","sum(VALUE._col33)","sum(VALUE._col34)","sum(VALUE._col35)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 'DIAMOND,AIRBORNE', 2002
+                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)","sum(VALUE._col24)","sum(VALUE._col25)","sum(VALUE._col26)","sum(VALUE._col27)","sum(VALUE._col28)","sum(VALUE._col29)
 ","sum(VALUE._col30)","sum(VALUE._col31)","sum(VALUE._col32)","sum(VALUE._col33)","sum(VALUE._col34)","sum(VALUE._col35)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
                 <-Union 7 [SIMPLE_EDGE]
                   <-Reducer 19 [CONTAINS]
                     Reduce Output Operator [RS_70]
-                      PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBORNE', 2002
+                      PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
                       Group By Operator [GBY_69] (rows=316240137 width=135)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)","sum(_col42)","sum(_col43)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBO
 RNE', 2002
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
                         Select Operator [SEL_67] (rows=316240137 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
-                          Select Operator [SEL_65] (rows=210822976 width=135)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
-                            Group By Operator [GBY_64] (rows=210822976 width=135)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 2002
-                            <-Reducer 18 [SIMPLE_EDGE]
-                              SHUFFLE [RS_63]
-                                PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 2002
-                                Group By Operator [GBY_62] (rows=421645953 width=135)
-                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 2002
-                                  Select Operator [SEL_60] (rows=421645953 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"]
-                                    Merge Join Operator [MERGEJOIN_122] (rows=421645953 width=135)
-                                      Conds:RS_57._col3=RS_58._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
-                                    <-Map 23 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_58]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_47] (rows=27 width=1029)
-                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                          Filter Operator [FIL_114] (rows=27 width=1029)
-                                            predicate:w_warehouse_sk is not null
-                                            TableScan [TS_45] (rows=27 width=1029)
-                                              default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
-                                    <-Reducer 17 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_57]
-                                        PartitionCols:_col3
-                                        Merge Join Operator [MERGEJOIN_121] (rows=383314495 width=135)
-                                          Conds:RS_54._col2=RS_55._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
-                                        <-Map 22 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_55]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_44] (rows=1 width=0)
-                                              Output:["_col0"]
-                                              Filter Operator [FIL_113] (rows=1 width=0)
-                                                predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
-                                                TableScan [TS_42] (rows=1 width=0)
-                                                  default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
-                                        <-Reducer 16 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_54]
-                                            PartitionCols:_col2
-                                            Merge Join Operator [MERGEJOIN_120] (rows=348467716 width=135)
-                                              Conds:RS_51._col0=RS_52._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
-                                            <-Map 21 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_52]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_41] (rows=36524 width=1119)
-                                                  Output:["_col0","_col2"]
-                                                  Filter Operator [FIL_112] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2002) and d_date_sk is not null)
-                                                    TableScan [TS_39] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                            <-Reducer 15 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_51]
-                                                PartitionCols:_col0
-                                                Merge Join Operator [MERGEJOIN_119] (rows=316788826 width=135)
-                                                  Conds:RS_48._col1=RS_49._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
-                                                <-Map 14 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_48]
-                                                    PartitionCols:_col1
-                                                    Select Operator [SEL_35] (rows=287989836 width=135)
-                                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                      Filter Operator [FIL_110] (rows=287989836 width=135)
-                                                        predicate:(cs_warehouse_sk is not null and cs_sold_date_sk is not null and cs_sold_time_sk is not null and cs_ship_mode_sk is not null)
-                                                        TableScan [TS_33] (rows=287989836 width=135)
-                                                          default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
-                                                <-Map 20 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_49]
-                                                    PartitionCols:_col0
-                                                    Select Operator [SEL_38] (rows=43200 width=471)
-                                                      Output:["_col0"]
-                                                      Filter Operator [FIL_111] (rows=43200 width=471)
-                                                        predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
-                                                        TableScan [TS_36] (rows=86400 width=471)
-                                                          default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
+                          Group By Operator [GBY_64] (rows=210822976 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
+                          <-Reducer 18 [SIMPLE_EDGE]
+                            SHUFFLE [RS_63]
+                              PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
+                              Group By Operator [GBY_62] (rows=421645953 width=135)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
+                                Select Operator [SEL_60] (rows=421645953 width=135)
+                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
+                                  Merge Join Operator [MERGEJOIN_122] (rows=421645953 width=135)
+                                    Conds:RS_57._col3=RS_58._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
+                                  <-Map 23 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_58]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_47] (rows=27 width=1029)
+                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                        Filter Operator [FIL_114] (rows=27 width=1029)
+                                          predicate:w_warehouse_sk is not null
+                                          TableScan [TS_45] (rows=27 width=1029)
+                                            default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
+                                  <-Reducer 17 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_57]
+                                      PartitionCols:_col3
+                                      Merge Join Operator [MERGEJOIN_121] (rows=383314495 width=135)
+                                        Conds:RS_54._col2=RS_55._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
+                                      <-Map 22 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_55]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_44] (rows=1 width=0)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_113] (rows=1 width=0)
+                                              predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
+                                              TableScan [TS_42] (rows=1 width=0)
+                                                default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
+                                      <-Reducer 16 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_54]
+                                          PartitionCols:_col2
+                                          Merge Join Operator [MERGEJOIN_120] (rows=348467716 width=135)
+                                            Conds:RS_51._col0=RS_52._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
+                                          <-Map 21 [SIMPLE_EDGE]
+                                            SHUFFLE [RS_52]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_41] (rows=36524 width=1119)
+                                                Output:["_col0","_col2"]
+                                                Filter Operator [FIL_112] (rows=36524 width=1119)
+                                                  predicate:((d_year = 2002) and d_date_sk is not null)
+                                                  TableScan [TS_39] (rows=73049 width=1119)
+                                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                          <-Reducer 15 [SIMPLE_EDGE]
+                                            SHUFFLE [RS_51]
+                                              PartitionCols:_col0
+                                              Merge Join Operator [MERGEJOIN_119] (rows=316788826 width=135)
+                                                Conds:RS_48._col1=RS_49._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
+                                              <-Map 14 [SIMPLE_EDGE]
+                                                SHUFFLE [RS_48]
+                                                  PartitionCols:_col1
+                                                  Select Operator [SEL_35] (rows=287989836 width=135)
+                                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                    Filter Operator [FIL_110] (rows=287989836 width=135)
+                                                      predicate:(cs_warehouse_sk is not null and cs_sold_date_sk is not null and cs_sold_time_sk is not null and cs_ship_mode_sk is not null)
+                                                      TableScan [TS_33] (rows=287989836 width=135)
+                                                        default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
+                                              <-Map 20 [SIMPLE_EDGE]
+                                                SHUFFLE [RS_49]
+                                                  PartitionCols:_col0
+                                                  Select Operator [SEL_38] (rows=43200 width=471)
+                                                    Output:["_col0"]
+                                                    Filter Operator [FIL_111] (rows=43200 width=471)
+                                                      predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
+                                                      TableScan [TS_36] (rows=86400 width=471)
+                                                        default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
                   <-Reducer 6 [CONTAINS]
                     Reduce Output Operator [RS_70]
-                      PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBORNE', 2002
+                      PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
                       Group By Operator [GBY_69] (rows=316240137 width=135)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"],aggregations:["sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)","sum(_col42)","sum(_col43)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 'DIAMOND,AIRBO
 RNE', 2002
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)","sum(_col31)","sum(_col32)","sum(_col33)","sum(_col34)","sum(_col35)","sum(_col36)","sum(_col37)","sum(_col38)","sum(_col39)","sum(_col40)","sum(_col41)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
                         Select Operator [SEL_67] (rows=316240137 width=135)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41","_col42","_col43"]
-                          Select Operator [SEL_32] (rows=105417161 width=135)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
-                            Group By Operator [GBY_31] (rows=105417161 width=135)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, 2002
-                            <-Reducer 5 [SIMPLE_EDGE]
-                              SHUFFLE [RS_30]
-                                PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, 2002
-                                Group By Operator [GBY_29] (rows=210834322 width=135)
-                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"],aggregations:["sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)","sum(_col30)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, 2002
-                                  Select Operator [SEL_27] (rows=210834322 width=135)
-                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30"]
-                                    Merge Join Operator [MERGEJOIN_118] (rows=210834322 width=135)
-                                      Conds:RS_24._col3=RS_25._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
-                                    <-Map 13 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_25]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_14] (rows=27 width=1029)
-                                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                          Filter Operator [FIL_109] (rows=27 width=1029)
-                                            predicate:w_warehouse_sk is not null
-                                            TableScan [TS_12] (rows=27 width=1029)
-                                              default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
-                                    <-Reducer 4 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_24]
-                                        PartitionCols:_col3
-                                        Merge Join Operator [MERGEJOIN_117] (rows=191667562 width=135)
-                                          Conds:RS_21._col2=RS_22._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
-                                        <-Map 12 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_22]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_11] (rows=1 width=0)
-                                              Output:["_col0"]
-                                              Filter Operator [FIL_108] (rows=1 width=0)
-                                                predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
-                                                TableScan [TS_9] (rows=1 width=0)
-                                                  default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
-                                        <-Reducer 3 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_21]
-                                            PartitionCols:_col2
-                                            Merge Join Operator [MERGEJOIN_116] (rows=174243235 width=135)
-                                              Conds:RS_18._col0=RS_19._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
-                                            <-Map 11 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_19]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_8] (rows=36524 width=1119)
-                                                  Output:["_col0","_col2"]
-                                                  Filter Operator [FIL_107] (rows=36524 width=1119)
-                                                    predicate:((d_year = 2002) and d_date_sk is not null)
-                                                    TableScan [TS_6] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                                            <-Reducer 2 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_18]
-                                                PartitionCols:_col0
-                                                Merge Join Operator [MERGEJOIN_115] (rows=158402938 width=135)
-                                                  Conds:RS_15._col1=RS_16._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
-                                                <-Map 1 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_15]
-                                                    PartitionCols:_col1
-                                                    Select Operator [SEL_2] (rows=144002668 width=135)
-                                                      Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                      Filter Operator [FIL_105] (rows=144002668 width=135)
-                                                        predicate:(ws_warehouse_sk is not null and ws_sold_date_sk is not null and ws_sold_time_sk is not null and ws_ship_mode_sk is not null)
-                                                        TableScan [TS_0] (rows=144002668 width=135)
-                                                          default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
-                                                <-Map 10 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_16]
-                                                    PartitionCols:_col0
-                                                    Select Operator [SEL_5] (rows=43200 width=471)
-                                                      Output:["_col0"]
-                                                      Filter Operator [FIL_106] (rows=43200 width=471)
-                                                        predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
-                                                        TableScan [TS_3] (rows=86400 width=471)
-                                                          default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
+                          Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29","_col30","_col31","_col32","_col33","_col34","_col35","_col36","_col37","_col38","_col39","_col40","_col41"]
+                          Group By Operator [GBY_31] (rows=105417161 width=135)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","sum(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)","sum(VALUE._col12)","sum(VALUE._col13)","sum(VALUE._col14)","sum(VALUE._col15)","sum(VALUE._col16)","sum(VALUE._col17)","sum(VALUE._col18)","sum(VALUE._col19)","sum(VALUE._col20)","sum(VALUE._col21)","sum(VALUE._col22)","sum(VALUE._col23)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5
+                          <-Reducer 5 [SIMPLE_EDGE]
+                            SHUFFLE [RS_30]
+                              PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5
+                              Group By Operator [GBY_29] (rows=210834322 width=135)
+                                Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)","sum(_col9)","sum(_col10)","sum(_col11)","sum(_col12)","sum(_col13)","sum(_col14)","sum(_col15)","sum(_col16)","sum(_col17)","sum(_col18)","sum(_col19)","sum(_col20)","sum(_col21)","sum(_col22)","sum(_col23)","sum(_col24)","sum(_col25)","sum(_col26)","sum(_col27)","sum(_col28)","sum(_col29)"],keys:_col0, _col1, _col2, _col3, _col4, _col5
+                                Select Operator [SEL_27] (rows=210834322 width=135)
+                                  Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20","_col21","_col22","_col23","_col24","_col25","_col26","_col27","_col28","_col29"]
+                                  Merge Join Operator [MERGEJOIN_118] (rows=210834322 width=135)
+                                    Conds:RS_24._col3=RS_25._col0(Inner),Output:["_col4","_col5","_col6","_col11","_col15","_col16","_col17","_col18","_col19","_col20"]
+                                  <-Map 13 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_25]
+                                      PartitionCols:_col0
+                                      Select Operator [SEL_14] (rows=27 width=1029)
+                                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                        Filter Operator [FIL_109] (rows=27 width=1029)
+                                          predicate:w_warehouse_sk is not null
+                                          TableScan [TS_12] (rows=27 width=1029)
+                                            default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name","w_warehouse_sq_ft","w_city","w_county","w_state","w_country"]
+                                  <-Reducer 4 [SIMPLE_EDGE]
+                                    SHUFFLE [RS_24]
+                                      PartitionCols:_col3
+                                      Merge Join Operator [MERGEJOIN_117] (rows=191667562 width=135)
+                                        Conds:RS_21._col2=RS_22._col0(Inner),Output:["_col3","_col4","_col5","_col6","_col11"]
+                                      <-Map 12 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_22]
+                                          PartitionCols:_col0
+                                          Select Operator [SEL_11] (rows=1 width=0)
+                                            Output:["_col0"]
+                                            Filter Operator [FIL_108] (rows=1 width=0)
+                                              predicate:((sm_carrier) IN ('DIAMOND', 'AIRBORNE') and sm_ship_mode_sk is not null)
+                                              TableScan [TS_9] (rows=1 width=0)
+                                                default@ship_mode,ship_mode,Tbl:PARTIAL,Col:NONE,Output:["sm_ship_mode_sk","sm_carrier"]
+                                      <-Reducer 3 [SIMPLE_EDGE]
+                                        SHUFFLE [RS_21]
+                                          PartitionCols:_col2
+                                          Merge Join Operator [MERGEJOIN_116] (rows=174243235 width=135)
+                                            Conds:RS_18._col0=RS_19._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6","_col11"]
+                                          <-Map 11 [SIMPLE_EDGE]
+                                            SHUFFLE [RS_19]
+                                              PartitionCols:_col0
+                                              Select Operator [SEL_8] (rows=36524 width=1119)
+                                                Output:["_col0","_col2"]
+                                                Filter Operator [FIL_107] (rows=36524 width=1119)
+                                                  predicate:((d_year = 2002) and d_date_sk is not null)
+                                                  TableScan [TS_6] (rows=73049 width=1119)
+                                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
+                                          <-Reducer 2 [SIMPLE_EDGE]
+                                            SHUFFLE [RS_18]
+                                              PartitionCols:_col0
+                                              Merge Join Operator [MERGEJOIN_115] (rows=158402938 width=135)
+                                                Conds:RS_15._col1=RS_16._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6"]
+                                              <-Map 1 [SIMPLE_EDGE]
+                                                SHUFFLE [RS_15]
+                                                  PartitionCols:_col1
+                                                  Select Operator [SEL_2] (rows=144002668 width=135)
+                                                    Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                    Filter Operator [FIL_105] (rows=144002668 width=135)
+                                                      predicate:(ws_warehouse_sk is not null and ws_sold_date_sk is not null and ws_sold_time_sk is not null and ws_ship_mode_sk is not null)
+                                                      TableScan [TS_0] (rows=144002668 width=135)
+                                                        default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
+                                              <-Map 10 [SIMPLE_EDGE]
+                                                SHUFFLE [RS_16]
+                                                  PartitionCols:_col0
+                                                  Select Operator [SEL_5] (rows=43200 width=471)
+                                                    Output:["_col0"]
+                                                    Filter Operator [FIL_106] (rows=43200 width=471)
+                                                      predicate:(t_time BETWEEN 49530 AND 78330 and t_time_sk is not null)
+                                                      TableScan [TS_3] (rows=86400 width=471)
+                                                        default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_time"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query7.q.out b/ql/src/test/results/clientpositive/perf/query7.q.out
index 749b361..a872c84 100644
--- a/ql/src/test/results/clientpositive/perf/query7.q.out
+++ b/ql/src/test/results/clientpositive/perf/query7.q.out
@@ -38,11 +38,11 @@ Stage-0
                       <-Map 11 [SIMPLE_EDGE]
                         SHUFFLE [RS_25]
                           PartitionCols:_col0
-                          Select Operator [SEL_14] (rows=3106 width=13)
+                          Select Operator [SEL_14] (rows=132 width=304)
                             Output:["_col0"]
-                            Filter Operator [FIL_54] (rows=3106 width=13)
+                            Filter Operator [FIL_54] (rows=132 width=304)
                               predicate:((cd_gender = 'F') and (cd_marital_status = 'W') and (cd_education_status = 'Primary') and cd_demo_sk is not null)
-                              TableScan [TS_12] (rows=24850 width=13)
+                              TableScan [TS_12] (rows=1062 width=304)
                                 default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_24]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query72.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query72.q.out b/ql/src/test/results/clientpositive/perf/query72.q.out
index 8bf7838..718129e 100644
--- a/ql/src/test/results/clientpositive/perf/query72.q.out
+++ b/ql/src/test/results/clientpositive/perf/query72.q.out
@@ -76,11 +76,11 @@ Stage-0
                                     <-Map 21 [SIMPLE_EDGE]
                                       SHUFFLE [RS_51]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_31] (rows=32306 width=5)
+                                        Select Operator [SEL_31] (rows=1553 width=104)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_131] (rows=32306 width=5)
+                                          Filter Operator [FIL_131] (rows=1553 width=104)
                                             predicate:((cd_marital_status = 'M') and cd_demo_sk is not null)
-                                            TableScan [TS_29] (rows=64612 width=5)
+                                            TableScan [TS_29] (rows=3106 width=104)
                                               default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status"]
                                     <-Reducer 8 [SIMPLE_EDGE]
                                       SHUFFLE [RS_50]


[31/66] [abbrv] hive git commit: HIVE-13332 : support dumping all row indexes in ORC FileDump (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by sp...@apache.org.
HIVE-13332 : support dumping all row indexes in ORC FileDump (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ebc5e6a7
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ebc5e6a7
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ebc5e6a7

Branch: refs/heads/java8
Commit: ebc5e6a7bf5d9add958339e773f858833fbad4ab
Parents: f40f8bc
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed May 25 10:24:33 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Wed May 25 10:24:54 2016 -0700

----------------------------------------------------------------------
 orc/src/java/org/apache/orc/tools/FileDump.java |  24 +-
 .../java/org/apache/orc/tools/JsonFileDump.java |  10 +-
 .../hive/ql/hooks/PostExecOrcFileDump.java      |   2 +-
 .../results/clientpositive/orc_file_dump.q.out  | 231 +++++++++++++++++++
 .../results/clientpositive/orc_merge10.q.out    |  16 ++
 .../results/clientpositive/orc_merge11.q.out    | 120 ++++++++++
 .../results/clientpositive/orc_merge12.q.out    | 216 +++++++++++++++++
 .../clientpositive/tez/orc_merge10.q.out        |  24 ++
 .../clientpositive/tez/orc_merge11.q.out        | 120 ++++++++++
 .../clientpositive/tez/orc_merge12.q.out        | 216 +++++++++++++++++
 10 files changed, 970 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/orc/src/java/org/apache/orc/tools/FileDump.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/tools/FileDump.java b/orc/src/java/org/apache/orc/tools/FileDump.java
index e32027f..1a1d8ab 100644
--- a/orc/src/java/org/apache/orc/tools/FileDump.java
+++ b/orc/src/java/org/apache/orc/tools/FileDump.java
@@ -96,7 +96,7 @@ public final class FileDump {
   public static void main(String[] args) throws Exception {
     Configuration conf = new Configuration();
 
-    List<Integer> rowIndexCols = null;
+    List<Integer> rowIndexCols = new ArrayList<Integer>(0);
     Options opts = createOptions();
     CommandLine cli = new GnuParser().parse(opts, args);
 
@@ -115,10 +115,15 @@ public final class FileDump {
     }
 
     if (cli.hasOption("r")) {
-      String[] colStrs = cli.getOptionValue("r").split(",");
-      rowIndexCols = new ArrayList<Integer>(colStrs.length);
-      for (String colStr : colStrs) {
-        rowIndexCols.add(Integer.parseInt(colStr));
+      String val = cli.getOptionValue("r");
+      if (val != null && val.trim().equals("*")) {
+        rowIndexCols = null; // All the columns
+      } else {
+        String[] colStrs = cli.getOptionValue("r").split(",");
+        rowIndexCols = new ArrayList<Integer>(colStrs.length);
+        for (String colStr : colStrs) {
+          rowIndexCols.add(Integer.parseInt(colStr));
+        }
       }
     }
 
@@ -317,7 +322,7 @@ public final class FileDump {
   }
 
   private static void printMetaDataImpl(final String filename,
-      final Configuration conf, final List<Integer> rowIndexCols, final boolean printTimeZone,
+      final Configuration conf, List<Integer> rowIndexCols, final boolean printTimeZone,
       final List<String> corruptFiles) throws IOException {
     Path file = new Path(filename);
     Reader reader = getReader(file, conf, corruptFiles);
@@ -348,6 +353,12 @@ public final class FileDump {
     }
     ColumnStatistics[] stats = reader.getStatistics();
     int colCount = stats.length;
+    if (rowIndexCols == null) {
+      rowIndexCols = new ArrayList<>(colCount);
+      for (int i = 0; i < colCount; ++i) {
+        rowIndexCols.add(i);
+      }
+    }
     System.out.println("\nFile Statistics:");
     for (int i = 0; i < stats.length; ++i) {
       System.out.println("  Column " + i + ": " + stats[i].toString());
@@ -712,6 +723,7 @@ public final class FileDump {
     return paddedBytes;
   }
 
+  @SuppressWarnings("static-access")
   static Options createOptions() {
     Options result = new Options();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/orc/src/java/org/apache/orc/tools/JsonFileDump.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/tools/JsonFileDump.java b/orc/src/java/org/apache/orc/tools/JsonFileDump.java
index 75153a2..e2048ea 100644
--- a/orc/src/java/org/apache/orc/tools/JsonFileDump.java
+++ b/orc/src/java/org/apache/orc/tools/JsonFileDump.java
@@ -18,6 +18,7 @@
 package org.apache.orc.tools;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Set;
 
@@ -114,6 +115,12 @@ public class JsonFileDump {
 
         ColumnStatistics[] stats = reader.getStatistics();
         int colCount = stats.length;
+        if (rowIndexCols == null) {
+          rowIndexCols = new ArrayList<>(colCount);
+          for (int i = 0; i < colCount; ++i) {
+            rowIndexCols.add(i);
+          }
+        }
         writer.key("fileStatistics").array();
         for (int i = 0; i < stats.length; ++i) {
           writer.object();
@@ -165,8 +172,7 @@ public class JsonFileDump {
             writer.endObject();
           }
           writer.endArray();
-
-          if (rowIndexCols != null && !rowIndexCols.isEmpty()) {
+          if (!rowIndexCols.isEmpty()) {
             // include the columns that are specified, only if the columns are included, bloom filter
             // will be read
             boolean[] sargColumns = new boolean[colCount];

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcFileDump.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcFileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcFileDump.java
index e184fcb..b1595ce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcFileDump.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecOrcFileDump.java
@@ -98,7 +98,7 @@ public class PostExecOrcFileDump implements ExecuteWithHookContext {
               // just creating orc reader is going to do sanity checks to make sure its valid ORC file
               OrcFile.createReader(fs, fileStatus.getPath());
               console.printError("-- BEGIN ORC FILE DUMP --");
-              FileDump.main(new String[]{fileStatus.getPath().toString(), "--rowindex=1"});
+              FileDump.main(new String[]{fileStatus.getPath().toString(), "--rowindex=*"});
               console.printError("-- END ORC FILE DUMP --");
             } catch (FileFormatException e) {
               LOG.warn("File " + fileStatus.getPath() + " is not ORC. Skip printing orc file dump");

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/orc_file_dump.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_file_dump.q.out b/ql/src/test/results/clientpositive/orc_file_dump.q.out
index a97f6de..4cc8e3b 100644
--- a/ql/src/test/results/clientpositive/orc_file_dump.q.out
+++ b/ql/src/test/results/clientpositive/orc_file_dump.q.out
@@ -184,6 +184,13 @@ Stripes:
     Encoding column 9: DIRECT_V2
     Encoding column 10: DIRECT_V2
     Encoding column 11: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 1000 hasNull: false positions: 
+      Entry 1: count: 49 hasNull: false positions: 
+    Bloom filters for column 0:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
     Row group indices for column 1:
       Entry 0: count: 997 hasNull: true min: -3 max: 124 sum: 59325 positions: 0,0,0,0,0,0,0
       Entry 1: count: 49 hasNull: false min: 2 max: 123 sum: 3105 positions: 0,10,113,0,0,903,101
@@ -191,6 +198,76 @@ Stripes:
       Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 492 loadFactor: 0.0784 expectedFpp: 3.7864847E-5
       Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 168 loadFactor: 0.0268 expectedFpp: 5.147697E-7
       Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 492 loadFactor: 0.0784 expectedFpp: 3.7864847E-5
+    Row group indices for column 2:
+      Entry 0: count: 997 hasNull: true min: 256 max: 511 sum: 381766 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 256 max: 507 sum: 17123 positions: 0,10,114,0,0,1026,485
+    Bloom filters for column 2:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 928 loadFactor: 0.148 expectedFpp: 4.792562E-4
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 175 loadFactor: 0.0279 expectedFpp: 6.0607726E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 928 loadFactor: 0.148 expectedFpp: 4.792562E-4
+    Row group indices for column 3:
+      Entry 0: count: 1000 hasNull: false min: 65536 max: 65791 sum: 65662251 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: 65540 max: 65788 sum: 3218800 positions: 0,1538,488
+    Bloom filters for column 3:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 947 loadFactor: 0.151 expectedFpp: 5.1972753E-4
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 171 loadFactor: 0.0273 expectedFpp: 5.525356E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 951 loadFactor: 0.1516 expectedFpp: 5.285643E-4
+    Row group indices for column 4:
+      Entry 0: count: 1000 hasNull: false min: 4294967296 max: 4294967551 sum: 4294967421966 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: 4294967301 max: 4294967550 sum: 210453403987 positions: 0,2885,422
+    Bloom filters for column 4:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 900 loadFactor: 0.1435 expectedFpp: 4.2398053E-4
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 177 loadFactor: 0.0282 expectedFpp: 6.342622E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 905 loadFactor: 0.1443 expectedFpp: 4.3348098E-4
+    Row group indices for column 5:
+      Entry 0: count: 1000 hasNull: false min: 0.07999999821186066 max: 99.91999816894531 sum: 49977.420025609434 positions: 0,0
+      Entry 1: count: 49 hasNull: false min: 2.7899999618530273 max: 97.6500015258789 sum: 2767.280002593994 positions: 0,4000
+    Bloom filters for column 5:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2846 loadFactor: 0.4538 expectedFpp: 0.042395078
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 192 loadFactor: 0.0306 expectedFpp: 8.781743E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 2940 loadFactor: 0.4688 expectedFpp: 0.048279762
+    Row group indices for column 6:
+      Entry 0: count: 1000 hasNull: false min: 0.02 max: 49.85 sum: 25045.26999999998 positions: 0,0
+      Entry 1: count: 49 hasNull: false min: 1.71 max: 48.78 sum: 1241.08 positions: 0,8000
+    Bloom filters for column 6:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2696 loadFactor: 0.4298 expectedFpp: 0.03413936
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 195 loadFactor: 0.0311 expectedFpp: 9.3436E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 2792 loadFactor: 0.4452 expectedFpp: 0.03926788
+    Row group indices for column 7:
+      Entry 0: count: 1000 hasNull: false true: 501 positions: 0,0,0,0
+      Entry 1: count: 49 hasNull: false true: 25 positions: 0,0,125,0
+    Bloom filters for column 7:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+    Row group indices for column 8:
+      Entry 0: count: 1000 hasNull: false min:  max: zach zipper sum: 12798 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: alice allen max: zach xylophone sum: 645 positions: 0,1026,488
+    Bloom filters for column 8:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 1735 loadFactor: 0.2766 expectedFpp: 0.0058556325
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 182 loadFactor: 0.029 expectedFpp: 7.090246E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 1772 loadFactor: 0.2825 expectedFpp: 0.0063713384
+    Row group indices for column 9:
+      Entry 0: count: 1000 hasNull: false min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 positions: 0,0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 positions: 0,7,488,0,1538,488
+    Bloom filters for column 9:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
+    Row group indices for column 10:
+      Entry 0: count: 1000 hasNull: false min: 0.08 max: 99.94 sum: 51182.11 positions: 0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 2.48 max: 94.9 sum: 2464.05 positions: 0,2159,0,476,4
+    Bloom filters for column 10:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2848 loadFactor: 0.4541 expectedFpp: 0.042514365
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 194 loadFactor: 0.0309 expectedFpp: 9.153406E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 2952 loadFactor: 0.4707 expectedFpp: 0.049072847
+    Row group indices for column 11:
+      Entry 0: count: 1000 hasNull: false sum: 12674 positions: 0,0,0,0,0
+      Entry 1: count: 49 hasNull: false sum: 604 positions: 0,12674,0,979,20
+    Bloom filters for column 11:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 102 loadFactor: 0.0163 expectedFpp: 6.9948186E-8
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 98 loadFactor: 0.0156 expectedFpp: 5.9604645E-8
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 102 loadFactor: 0.0163 expectedFpp: 6.9948186E-8
 
 File length: 33416 bytes
 Padding length: 0 bytes
@@ -304,6 +381,13 @@ Stripes:
     Encoding column 9: DIRECT_V2
     Encoding column 10: DIRECT_V2
     Encoding column 11: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 1000 hasNull: false positions: 
+      Entry 1: count: 49 hasNull: false positions: 
+    Bloom filters for column 0:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 0 loadFactor: 0 expectedFpp: 0.0
     Row group indices for column 1:
       Entry 0: count: 997 hasNull: true min: -3 max: 124 sum: 59325 positions: 0,0,0,0,0,0,0
       Entry 1: count: 49 hasNull: false min: 2 max: 123 sum: 3105 positions: 0,10,113,0,0,903,101
@@ -311,6 +395,76 @@ Stripes:
       Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 849 loadFactor: 0.0884 expectedFpp: 4.231118E-8
       Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 285 loadFactor: 0.0297 expectedFpp: 2.0324289E-11
       Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 849 loadFactor: 0.0884 expectedFpp: 4.231118E-8
+    Row group indices for column 2:
+      Entry 0: count: 997 hasNull: true min: 256 max: 511 sum: 381766 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 256 max: 507 sum: 17123 positions: 0,10,114,0,0,1026,485
+    Bloom filters for column 2:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 1617 loadFactor: 0.1684 expectedFpp: 3.8465505E-6
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 306 loadFactor: 0.0319 expectedFpp: 3.343115E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 1617 loadFactor: 0.1684 expectedFpp: 3.8465505E-6
+    Row group indices for column 3:
+      Entry 0: count: 1000 hasNull: false min: 65536 max: 65791 sum: 65662251 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: 65540 max: 65788 sum: 3218800 positions: 0,1538,488
+    Bloom filters for column 3:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 1632 loadFactor: 0.17 expectedFpp: 4.1033873E-6
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 296 loadFactor: 0.0308 expectedFpp: 2.6493746E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 1638 loadFactor: 0.1706 expectedFpp: 4.2101606E-6
+    Row group indices for column 4:
+      Entry 0: count: 1000 hasNull: false min: 4294967296 max: 4294967551 sum: 4294967421966 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: 4294967301 max: 4294967550 sum: 210453403987 positions: 0,2885,422
+    Bloom filters for column 4:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 1611 loadFactor: 0.1678 expectedFpp: 3.7477455E-6
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 318 loadFactor: 0.0331 expectedFpp: 4.3761383E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 1622 loadFactor: 0.169 expectedFpp: 3.9305864E-6
+    Row group indices for column 5:
+      Entry 0: count: 1000 hasNull: false min: 0.07999999821186066 max: 99.91999816894531 sum: 49977.420025609434 positions: 0,0
+      Entry 1: count: 49 hasNull: false min: 2.7899999618530273 max: 97.6500015258789 sum: 2767.280002593994 positions: 0,4000
+    Bloom filters for column 5:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 4847 loadFactor: 0.5049 expectedFpp: 0.008363968
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 341 loadFactor: 0.0355 expectedFpp: 7.134803E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 4989 loadFactor: 0.5197 expectedFpp: 0.010237543
+    Row group indices for column 6:
+      Entry 0: count: 1000 hasNull: false min: 0.02 max: 49.85 sum: 25045.26999999998 positions: 0,0
+      Entry 1: count: 49 hasNull: false min: 1.71 max: 48.78 sum: 1241.08 positions: 0,8000
+    Bloom filters for column 6:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 4562 loadFactor: 0.4752 expectedFpp: 0.005472533
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 337 loadFactor: 0.0351 expectedFpp: 6.569173E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 4710 loadFactor: 0.4906 expectedFpp: 0.006843018
+    Row group indices for column 7:
+      Entry 0: count: 1000 hasNull: false true: 501 positions: 0,0,0,0
+      Entry 1: count: 49 hasNull: false true: 25 positions: 0,0,125,0
+    Bloom filters for column 7:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+    Row group indices for column 8:
+      Entry 0: count: 1000 hasNull: false min:  max: zach zipper sum: 12798 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: alice allen max: zach xylophone sum: 645 positions: 0,1026,488
+    Bloom filters for column 8:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 2961 loadFactor: 0.3084 expectedFpp: 2.655646E-4
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 317 loadFactor: 0.033 expectedFpp: 4.2807122E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 3015 loadFactor: 0.3141 expectedFpp: 3.0137875E-4
+    Row group indices for column 9:
+      Entry 0: count: 1000 hasNull: false min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 positions: 0,0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 positions: 0,7,488,0,1538,488
+    Bloom filters for column 9:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 7 loadFactor: 0.0007 expectedFpp: 1.0959422E-22
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 7 loadFactor: 0.0007 expectedFpp: 1.0959422E-22
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 7 loadFactor: 0.0007 expectedFpp: 1.0959422E-22
+    Row group indices for column 10:
+      Entry 0: count: 1000 hasNull: false min: 0.08 max: 99.94 sum: 51182.11 positions: 0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 2.48 max: 94.9 sum: 2464.05 positions: 0,2159,0,476,4
+    Bloom filters for column 10:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 4796 loadFactor: 0.4996 expectedFpp: 0.0077670407
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 339 loadFactor: 0.0353 expectedFpp: 6.846983E-11
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 4947 loadFactor: 0.5153 expectedFpp: 0.009649276
+    Row group indices for column 11:
+      Entry 0: count: 1000 hasNull: false sum: 12674 positions: 0,0,0,0,0
+      Entry 1: count: 49 hasNull: false sum: 604 positions: 0,12674,0,979,20
+    Bloom filters for column 11:
+      Entry 0: numHashFunctions: 7 bitCount: 9600 popCount: 181 loadFactor: 0.0189 expectedFpp: 8.4693775E-13
+      Entry 1: numHashFunctions: 7 bitCount: 9600 popCount: 174 loadFactor: 0.0181 expectedFpp: 6.426078E-13
+      Stripe level merge: numHashFunctions: 7 bitCount: 9600 popCount: 181 loadFactor: 0.0189 expectedFpp: 8.4693775E-13
 
 File length: 38568 bytes
 Padding length: 0 bytes
@@ -436,6 +590,13 @@ Stripes:
     Encoding column 9: DIRECT_V2
     Encoding column 10: DIRECT_V2
     Encoding column 11: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 1000 hasNull: false positions: 
+      Entry 1: count: 49 hasNull: false positions: 
+    Bloom filters for column 0:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
     Row group indices for column 1:
       Entry 0: count: 997 hasNull: true min: -3 max: 124 sum: 59325 positions: 0,0,0,0,0,0,0
       Entry 1: count: 49 hasNull: false min: 2 max: 123 sum: 3105 positions: 0,10,113,0,0,903,101
@@ -443,6 +604,76 @@ Stripes:
       Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 492 loadFactor: 0.0784 expectedFpp: 3.7864847E-5
       Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 168 loadFactor: 0.0268 expectedFpp: 5.147697E-7
       Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 492 loadFactor: 0.0784 expectedFpp: 3.7864847E-5
+    Row group indices for column 2:
+      Entry 0: count: 997 hasNull: true min: 256 max: 511 sum: 381766 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 256 max: 507 sum: 17123 positions: 0,10,114,0,0,1026,485
+    Bloom filters for column 2:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 928 loadFactor: 0.148 expectedFpp: 4.792562E-4
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 175 loadFactor: 0.0279 expectedFpp: 6.0607726E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 928 loadFactor: 0.148 expectedFpp: 4.792562E-4
+    Row group indices for column 3:
+      Entry 0: count: 1000 hasNull: false min: 65536 max: 65791 sum: 65662251 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: 65540 max: 65788 sum: 3218800 positions: 0,1538,488
+    Bloom filters for column 3:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 947 loadFactor: 0.151 expectedFpp: 5.1972753E-4
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 171 loadFactor: 0.0273 expectedFpp: 5.525356E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 951 loadFactor: 0.1516 expectedFpp: 5.285643E-4
+    Row group indices for column 4:
+      Entry 0: count: 1000 hasNull: false min: 4294967296 max: 4294967551 sum: 4294967421966 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: 4294967301 max: 4294967550 sum: 210453403987 positions: 0,2885,422
+    Bloom filters for column 4:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 900 loadFactor: 0.1435 expectedFpp: 4.2398053E-4
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 177 loadFactor: 0.0282 expectedFpp: 6.342622E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 905 loadFactor: 0.1443 expectedFpp: 4.3348098E-4
+    Row group indices for column 5:
+      Entry 0: count: 1000 hasNull: false min: 0.07999999821186066 max: 99.91999816894531 sum: 49977.420025609434 positions: 0,0
+      Entry 1: count: 49 hasNull: false min: 2.7899999618530273 max: 97.6500015258789 sum: 2767.280002593994 positions: 0,4000
+    Bloom filters for column 5:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2846 loadFactor: 0.4538 expectedFpp: 0.042395078
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 192 loadFactor: 0.0306 expectedFpp: 8.781743E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 2940 loadFactor: 0.4688 expectedFpp: 0.048279762
+    Row group indices for column 6:
+      Entry 0: count: 1000 hasNull: false min: 0.02 max: 49.85 sum: 25045.26999999998 positions: 0,0
+      Entry 1: count: 49 hasNull: false min: 1.71 max: 48.78 sum: 1241.08 positions: 0,8000
+    Bloom filters for column 6:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2696 loadFactor: 0.4298 expectedFpp: 0.03413936
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 195 loadFactor: 0.0311 expectedFpp: 9.3436E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 2792 loadFactor: 0.4452 expectedFpp: 0.03926788
+    Row group indices for column 7:
+      Entry 0: count: 1000 hasNull: false true: 501 positions: 0,0,0,0
+      Entry 1: count: 49 hasNull: false true: 25 positions: 0,0,125,0
+    Bloom filters for column 7:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 0 loadFactor: 0 expectedFpp: 0.0
+    Row group indices for column 8:
+      Entry 0: count: 1000 hasNull: false min:  max: zach zipper sum: 12798 positions: 0,0,0
+      Entry 1: count: 49 hasNull: false min: alice allen max: zach xylophone sum: 645 positions: 0,1026,488
+    Bloom filters for column 8:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 1735 loadFactor: 0.2766 expectedFpp: 0.0058556325
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 182 loadFactor: 0.029 expectedFpp: 7.090246E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 1772 loadFactor: 0.2825 expectedFpp: 0.0063713384
+    Row group indices for column 9:
+      Entry 0: count: 1000 hasNull: false min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 positions: 0,0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 2013-03-01 09:11:58.703 max: 2013-03-01 09:11:58.703 positions: 0,7,488,0,1538,488
+    Bloom filters for column 9:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 4 loadFactor: 0.0006 expectedFpp: 1.6543056E-13
+    Row group indices for column 10:
+      Entry 0: count: 1000 hasNull: false min: 0.08 max: 99.94 sum: 51182.11 positions: 0,0,0,0,0
+      Entry 1: count: 49 hasNull: false min: 2.48 max: 94.9 sum: 2464.05 positions: 0,2159,0,476,4
+    Bloom filters for column 10:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 2848 loadFactor: 0.4541 expectedFpp: 0.042514365
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 194 loadFactor: 0.0309 expectedFpp: 9.153406E-7
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 2952 loadFactor: 0.4707 expectedFpp: 0.049072847
+    Row group indices for column 11:
+      Entry 0: count: 1000 hasNull: false sum: 12674 positions: 0,0,0,0,0
+      Entry 1: count: 49 hasNull: false sum: 604 positions: 0,12674,0,979,20
+    Bloom filters for column 11:
+      Entry 0: numHashFunctions: 4 bitCount: 6272 popCount: 102 loadFactor: 0.0163 expectedFpp: 6.9948186E-8
+      Entry 1: numHashFunctions: 4 bitCount: 6272 popCount: 98 loadFactor: 0.0156 expectedFpp: 5.9604645E-8
+      Stripe level merge: numHashFunctions: 4 bitCount: 6272 popCount: 102 loadFactor: 0.0163 expectedFpp: 6.9948186E-8
 
 File length: 33416 bytes
 Padding length: 0 bytes

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/orc_merge10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge10.q.out b/ql/src/test/results/clientpositive/orc_merge10.q.out
index cf70dcf..1d64ae5 100644
--- a/ql/src/test/results/clientpositive/orc_merge10.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge10.q.out
@@ -550,8 +550,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DICTIONARY_V2[114]
+    Row group indices for column 0:
+      Entry 0: count: 152 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 152 hasNull: false min: 0 max: 497 sum: 38034 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 152 hasNull: false min: val_0 max: val_97 sum: 1034 positions: 0,0,0
   Stripe: offset: 1140 data: 616 rows: 90 tail: 61 index: 76
     Stream: column 0 section ROW_INDEX start: 1140 length 11
     Stream: column 1 section ROW_INDEX start: 1151 length 27
@@ -562,8 +566,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 90 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 positions: 0,0,0,0,0
 
 File length: 2137 bytes
 Padding length: 0 bytes
@@ -612,8 +620,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DICTIONARY_V2[114]
+    Row group indices for column 0:
+      Entry 0: count: 152 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 152 hasNull: false min: 0 max: 497 sum: 38034 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 152 hasNull: false min: val_0 max: val_97 sum: 1034 positions: 0,0,0
   Stripe: offset: 1140 data: 616 rows: 90 tail: 61 index: 76
     Stream: column 0 section ROW_INDEX start: 1140 length 11
     Stream: column 1 section ROW_INDEX start: 1151 length 27
@@ -624,8 +636,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 90 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 positions: 0,0,0,0,0
 
 File length: 2137 bytes
 Padding length: 0 bytes

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/orc_merge11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out b/ql/src/test/results/clientpositive/orc_merge11.q.out
index 8a4d8e9..5143a6a 100644
--- a/ql/src/test/results/clientpositive/orc_merge11.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge11.q.out
@@ -118,12 +118,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
 
 File length: 6849 bytes
 Padding length: 0 bytes
@@ -179,12 +209,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
 
 File length: 6849 bytes
 Padding length: 0 bytes
@@ -270,12 +330,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
   Stripe: offset: 6511 data: 5897 rows: 50000 tail: 113 index: 498
     Stream: column 0 section ROW_INDEX start: 6511 length 17
     Stream: column 1 section ROW_INDEX start: 6528 length 83
@@ -298,12 +388,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
 
 File length: 13369 bytes
 Padding length: 0 bytes

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/orc_merge12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge12.q.out b/ql/src/test/results/clientpositive/orc_merge12.q.out
index f23be5a..6a86fcf 100644
--- a/ql/src/test/results/clientpositive/orc_merge12.q.out
+++ b/ql/src/test/results/clientpositive/orc_merge12.q.out
@@ -429,9 +429,117 @@ Stripes:
     Encoding column 34: DIRECT_V2
     Encoding column 35: DIRECT
     Encoding column 36: DIRECT
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 2288 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
       Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 2:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 3:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 4:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 5:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 6:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 7:
+      Entry 0: count: 10000 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 360000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 002d8ccb-a094-4d10-b283-999770cf8488 max: ffacef94-41da-4230-807a-509bbf50b057 sum: 82368 positions: 153190,97856,0,9766,272
+    Row group indices for column 8:
+      Entry 0: count: 10000 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 720000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 00124556-8383-44c4-a28b-7a413de74ccc4137606f-2cf7-43fb-beff-b6d374fd15ec max: ffde3bce-bb56-4fa9-81d7-146ca2eab946225c18e0-0002-4d07-9853-12c92c0f5637 sum: 164736 positions: 306445,195712,0,9766,272
+    Row group indices for column 9:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 10:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 11:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 12:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 13:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 14:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 15:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 16:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 17:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 18:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 19:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 20:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 21:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 22:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 23:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 24:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 25:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 26:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 27:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 28:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 29:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 30:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 31:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 32:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 33:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 34:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 35:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 36:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
   Stripe: offset: 1503357 data: 1500017 rows: 12288 tail: 501 index: 2836
     Stream: column 0 section ROW_INDEX start: 1503357 length 21
     Stream: column 1 section ROW_INDEX start: 1503378 length 53
@@ -593,9 +701,117 @@ Stripes:
     Encoding column 34: DIRECT_V2
     Encoding column 35: DIRECT
     Encoding column 36: DIRECT
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 2288 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
       Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 2:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 3:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 4:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 5:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 6:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 7:
+      Entry 0: count: 10000 hasNull: false min: 00020767-dd8f-4f4d-bd68-4b7be64b8e44 max: fffa3516-e219-4027-b0d3-72bb2e676c52 sum: 360000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 002d8ccb-a094-4d10-b283-999770cf8488 max: ffacef94-41da-4230-807a-509bbf50b057 sum: 82368 positions: 153190,97856,0,9766,272
+    Row group indices for column 8:
+      Entry 0: count: 10000 hasNull: false min: 000976f7-7075-4f3f-a564-5a375fafcc101416a2b7-7f64-41b7-851f-97d15405037e max: fffd0642-5f01-48cd-8d97-3428faee49e9b39f2b4c-efdc-4e5f-9ab5-4aa5394cb156 sum: 720000 positions: 0,0,0,0,0
+      Entry 1: count: 2288 hasNull: false min: 00124556-8383-44c4-a28b-7a413de74ccc4137606f-2cf7-43fb-beff-b6d374fd15ec max: ffde3bce-bb56-4fa9-81d7-146ca2eab946225c18e0-0002-4d07-9853-12c92c0f5637 sum: 164736 positions: 306445,195712,0,9766,272
+    Row group indices for column 9:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 10:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 11:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 12:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 13:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 14:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 15:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 16:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 17:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 18:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 19:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 20:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 21:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 22:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 23:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 24:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
+    Row group indices for column 25:
+      Entry 0: count: 7909 hasNull: true min: -64 max: 62 sum: -50203 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64 max: 62 sum: 10347 positions: 0,182,99,0,0,5937,2
+    Row group indices for column 26:
+      Entry 0: count: 7924 hasNull: true min: -16379 max: 16376 sum: 9298530 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309 max: 16331 sum: -1862540 positions: 0,126,96,0,0,15334,272
+    Row group indices for column 27:
+      Entry 0: count: 7139 hasNull: true min: -1073051226 max: 1073680599 sum: 1417841516466 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: -1073279343 max: 1072872630 sum: 20209347319 positions: 0,128,98,0,0,28584,0
+    Row group indices for column 28:
+      Entry 0: count: 6889 hasNull: true min: -2147311592 max: 2144325818 sum: -24788202148 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: -2144905793 max: 2145498388 sum: -1673671826261 positions: 0,168,7,0,0,26534,262
+    Row group indices for column 29:
+      Entry 0: count: 7909 hasNull: true min: -64.0 max: 79.5530014038086 sum: -49823.35599219799 positions: 0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: -64.0 max: 62.0 sum: 10343.719999313354 positions: 0,182,99,0,0,31636
+    Row group indices for column 30:
+      Entry 0: count: 7924 hasNull: true min: -16379.0 max: 9763215.5639 sum: 4.8325701237600006E7 positions: 0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: -16309.0 max: 9763215.5639 sum: 7897951.792899999 positions: 0,126,96,0,0,63392
+    Row group indices for column 31:
+      Entry 0: count: 7140 hasNull: true min: 0042l0d5rPD6sMlJ7Ue0q max: yxN0212hM17E8J8bJj8D7b sum: 99028 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true min: 006bb3K max: yy2GiGM sum: 28853 positions: 0,126,98,0,0,14308,0
+    Row group indices for column 32:
+      Entry 0: count: 6889 hasNull: true min: 0034fkcXMQI3 max: yyt0S8WorA sum: 109415 positions: 0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true min: 004J8y max: yjDBo sum: 39719 positions: 0,168,8,0,0,13280,262
+    Row group indices for column 33:
+      Entry 0: count: 7909 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1264 hasNull: true min: 1969-12-31 13:59:43.64 max: 1969-12-31 14:00:30.808 positions: 0,182,100,0,0,30619,258,0,15332,258
+    Row group indices for column 34:
+      Entry 0: count: 7924 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,0,0,0,0,0,0,0,0,0
+      Entry 1: count: 1250 hasNull: true min: 1969-12-31 13:59:30.929 max: 1969-12-31 14:00:30.808 positions: 0,126,97,0,0,30619,273,0,15334,272
+    Row group indices for column 35:
+      Entry 0: count: 7140 hasNull: true true: 5115 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2034 hasNull: true true: 1023 positions: 0,126,98,0,0,520,126,4
+    Row group indices for column 36:
+      Entry 0: count: 6889 hasNull: true true: 3402 positions: 0,0,0,0,0,0,0,0
+      Entry 1: count: 2284 hasNull: true true: 581 positions: 0,168,8,0,0,520,97,1
 
 File length: 3007981 bytes
 Padding length: 0 bytes

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_merge10.q.out b/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
index bcba1bd..2e16da0 100644
--- a/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_merge10.q.out
@@ -588,8 +588,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 90 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 positions: 0,0,0,0,0
   Stripe: offset: 756 data: 544 rows: 78 tail: 61 index: 76
     Stream: column 0 section ROW_INDEX start: 756 length 11
     Stream: column 1 section ROW_INDEX start: 767 length 27
@@ -600,8 +604,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 78 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 78 hasNull: false min: 0 max: 497 sum: 18371 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 78 hasNull: false min: val_0 max: val_95 sum: 529 positions: 0,0,0,0,0
   Stripe: offset: 1437 data: 519 rows: 74 tail: 61 index: 78
     Stream: column 0 section ROW_INDEX start: 1437 length 11
     Stream: column 1 section ROW_INDEX start: 1448 length 27
@@ -612,8 +620,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 74 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 74 hasNull: false min: 2 max: 493 sum: 19663 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 74 hasNull: false min: val_105 max: val_97 sum: 505 positions: 0,0,0,0,0
 
 File length: 2393 bytes
 Padding length: 0 bytes
@@ -665,8 +677,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 90 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 90 hasNull: false min: 0 max: 495 sum: 22736 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 90 hasNull: false min: val_0 max: val_86 sum: 612 positions: 0,0,0,0,0
   Stripe: offset: 756 data: 544 rows: 78 tail: 61 index: 76
     Stream: column 0 section ROW_INDEX start: 756 length 11
     Stream: column 1 section ROW_INDEX start: 767 length 27
@@ -677,8 +693,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 78 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 78 hasNull: false min: 0 max: 497 sum: 18371 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 78 hasNull: false min: val_0 max: val_95 sum: 529 positions: 0,0,0,0,0
   Stripe: offset: 1437 data: 519 rows: 74 tail: 61 index: 78
     Stream: column 0 section ROW_INDEX start: 1437 length 11
     Stream: column 1 section ROW_INDEX start: 1448 length 27
@@ -689,8 +709,12 @@ Stripes:
     Encoding column 0: DIRECT
     Encoding column 1: DIRECT_V2
     Encoding column 2: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 74 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 74 hasNull: false min: 2 max: 493 sum: 19663 positions: 0,0,0
+    Row group indices for column 2:
+      Entry 0: count: 74 hasNull: false min: val_105 max: val_97 sum: 505 positions: 0,0,0,0,0
 
 File length: 2393 bytes
 Padding length: 0 bytes

http://git-wip-us.apache.org/repos/asf/hive/blob/ebc5e6a7/ql/src/test/results/clientpositive/tez/orc_merge11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_merge11.q.out b/ql/src/test/results/clientpositive/tez/orc_merge11.q.out
index 8a4d8e9..5143a6a 100644
--- a/ql/src/test/results/clientpositive/tez/orc_merge11.q.out
+++ b/ql/src/test/results/clientpositive/tez/orc_merge11.q.out
@@ -118,12 +118,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
 
 File length: 6849 bytes
 Padding length: 0 bytes
@@ -179,12 +209,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
 
 File length: 6849 bytes
 Padding length: 0 bytes
@@ -270,12 +330,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
   Stripe: offset: 6511 data: 5897 rows: 50000 tail: 113 index: 498
     Stream: column 0 section ROW_INDEX start: 6511 length 17
     Stream: column 1 section ROW_INDEX start: 6528 length 83
@@ -298,12 +388,42 @@ Stripes:
     Encoding column 3: DIRECT
     Encoding column 4: DIRECT_V2
     Encoding column 5: DIRECT_V2
+    Row group indices for column 0:
+      Entry 0: count: 10000 hasNull: false positions: 
+      Entry 1: count: 10000 hasNull: false positions: 
+      Entry 2: count: 10000 hasNull: false positions: 
+      Entry 3: count: 10000 hasNull: false positions: 
+      Entry 4: count: 10000 hasNull: false positions: 
     Row group indices for column 1:
       Entry 0: count: 10000 hasNull: false min: 2 max: 100 sum: 999815 positions: 0,0,0
       Entry 1: count: 10000 hasNull: false min: 29 max: 100 sum: 999899 positions: 0,101,391
       Entry 2: count: 10000 hasNull: false min: 2 max: 100 sum: 999807 positions: 0,207,391
       Entry 3: count: 10000 hasNull: false min: 13 max: 100 sum: 999842 positions: 0,313,391
       Entry 4: count: 10000 hasNull: false min: 5 max: 100 sum: 999875 positions: 0,419,391
+    Row group indices for column 2:
+      Entry 0: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,0,0
+      Entry 1: count: 10000 hasNull: false min: cat max: zebra sum: 49996 positions: 0,82,391
+      Entry 2: count: 10000 hasNull: false min: eat max: zebra sum: 49996 positions: 0,168,391
+      Entry 3: count: 10000 hasNull: false min: bar max: zebra sum: 49996 positions: 0,254,391
+      Entry 4: count: 10000 hasNull: false min: dog max: zebra sum: 49996 positions: 0,340,391
+    Row group indices for column 3:
+      Entry 0: count: 10000 hasNull: false min: 0.8 max: 80.0 sum: 80064.8 positions: 0,0
+      Entry 1: count: 10000 hasNull: false min: 1.8 max: 8.0 sum: 79993.8 positions: 1002,2176
+      Entry 2: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79985.6 positions: 2053,256
+      Entry 3: count: 10000 hasNull: false min: 8.0 max: 80.0 sum: 80072.0 positions: 3067,2432
+      Entry 4: count: 10000 hasNull: false min: 0.8 max: 8.0 sum: 79986.6 positions: 4117,512
+    Row group indices for column 4:
+      Entry 0: count: 10000 hasNull: false min: 0 max: 2 sum: 3 positions: 0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 0 max: 4 sum: 7 positions: 83,1808,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 0 max: 6 sum: 7 positions: 167,3616,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 0 max: 3 sum: 5 positions: 290,1328,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 0 max: 6 sum: 10 positions: 380,3136,0,312,64
+    Row group indices for column 5:
+      Entry 0: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,0,0,0,0,0
+      Entry 1: count: 10000 hasNull: false min: 1969-12-31 16:00:10.0 max: 1969-12-31 16:04:10.0 positions: 0,164,391,0,76,272
+      Entry 2: count: 10000 hasNull: false min: 1969-12-31 16:00:00.0 max: 1969-12-31 16:04:10.0 positions: 0,336,391,0,156,32
+      Entry 3: count: 10000 hasNull: false min: 1969-12-31 16:00:05.0 max: 1969-12-31 16:04:10.0 positions: 0,508,391,0,232,304
+      Entry 4: count: 10000 hasNull: false min: 1969-12-31 16:00:15.0 max: 1969-12-31 16:04:10.0 positions: 0,680,391,0,312,64
 
 File length: 13369 bytes
 Padding length: 0 bytes


[02/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
index a790b97..7ca9604 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
@@ -1348,12 +1348,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -1375,7 +1375,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1455,9 +1455,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
@@ -1499,7 +1499,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1598,9 +1598,9 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
@@ -1610,7 +1610,7 @@ STAGE PLANS:
                      Inner Join 0 to 1
                 keys:
                   0 UDFToDouble(_col0) (type: double)
-                  1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1690,9 +1690,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      key expressions: (UDFToDouble(_col0) * 2.0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Map-reduce partition columns: (UDFToDouble(_col0) * 2.0) (type: double)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
@@ -1719,7 +1719,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  0 (UDFToDouble(_col0) * 2.0) (type: double)
                   1 _col0 (type: double)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1813,9 +1813,9 @@ STAGE PLANS:
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      key expressions: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       sort order: +
-                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
@@ -1857,7 +1857,7 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                  0 UDFToString((UDFToDouble(_col0) * 2.0)) (type: string)
                   1 UDFToString(_col0) (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -1923,6 +1923,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -1947,42 +1948,40 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col0 (type: string)
-                  1 '2008-04-08' (type: string)
-                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
+                  0 
+                  1 
+                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -2011,32 +2010,15 @@ STAGE PLANS:
             Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
-                keys: '2008-04-08' (type: string)
+                keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    sort order: 
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -2044,21 +2026,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
@@ -2074,7 +2053,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
 EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
@@ -2171,7 +2150,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -4247,7 +4226,7 @@ STAGE PLANS:
                            Inner Join 0 to 1
                       keys:
                         0 UDFToDouble(_col0) (type: double)
-                        1 UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                       input vertices:
                         1 Map 3
                       Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
@@ -4275,12 +4254,12 @@ STAGE PLANS:
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         sort order: +
-                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                       Select Operator
-                        expressions: UDFToDouble(UDFToInteger((_col0 / UDFToDouble(2)))) (type: double)
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
@@ -4367,7 +4346,7 @@ STAGE PLANS:
                       condition map:
                            Inner Join 0 to 1
                       keys:
-                        0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                        0 (UDFToDouble(_col0) * 2.0) (type: double)
                         1 _col0 (type: double)
                       input vertices:
                         1 Map 3
@@ -4470,6 +4449,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -4485,57 +4465,68 @@ STAGE PLANS:
     Tez
 #### A masked pattern was here ####
       Edges:
-        Map 1 <- Reducer 4 (BROADCAST_EDGE)
-        Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 4 <- Map 3 (SIMPLE_EDGE)
+        Reducer 3 <- Map 1 (BROADCAST_EDGE), Map 2 (SIMPLE_EDGE)
+        Reducer 4 <- Reducer 3 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: ds is not null (type: boolean)
-                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: ds (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Map Join Operator
-                      condition map:
-                           Inner Join 0 to 1
-                      keys:
-                        0 _col0 (type: string)
-                        1 '2008-04-08' (type: string)
-                      input vertices:
-                        1 Reducer 4
-                      Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                      HybridGraceHashJoin: true
-                      Group By Operator
-                        aggregations: count()
-                        mode: hash
-                        outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          sort order: 
-                          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col0 (type: bigint)
-        Map 3 
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+        Map 2 
             Map Operator Tree:
                 TableScan
                   alias: srcpart
                   filterExpr: (ds = '2008-04-08') (type: boolean)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  Group By Operator
-                    keys: '2008-04-08' (type: string)
-                    mode: hash
-                    outputColumnNames: _col0
+                  Select Operator
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: '2008-04-08' (type: string)
-                      sort order: +
-                      Map-reduce partition columns: '2008-04-08' (type: string)
+                    Group By Operator
+                      keys: '2008-04-08' (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-        Reducer 2 
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+        Reducer 3 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Map Join Operator
+                    condition map:
+                         Inner Join 0 to 1
+                    keys:
+                      0 
+                      1 
+                    input vertices:
+                      0 Map 1
+                    Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count()
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: bigint)
+        Reducer 4 
             Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
@@ -4550,36 +4541,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-        Reducer 4 
-            Execution mode: vectorized
-            Reduce Operator Tree:
-              Group By Operator
-                keys: '2008-04-08' (type: string)
-                mode: mergepartial
-                outputColumnNames: _col0
-                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: '2008-04-08' (type: string)
-                    sort order: +
-                    Map-reduce partition columns: '2008-04-08' (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Dynamic Partitioning Event Operator
-                        Target column: ds (string)
-                        Target Input: srcpart
-                        Partition key expr: ds
-                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Target Vertex: Map 1
 
   Stage: Stage-0
     Fetch Operator
@@ -4587,21 +4548,18 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/udf_folder_constants.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_folder_constants.q.out b/ql/src/test/results/clientpositive/udf_folder_constants.q.out
index 3e765d9..6ee520f 100644
--- a/ql/src/test/results/clientpositive/udf_folder_constants.q.out
+++ b/ql/src/test/results/clientpositive/udf_folder_constants.q.out
@@ -63,12 +63,12 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: month (type: int)
-                outputColumnNames: _col1
+                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col1 (type: int)
+                  key expressions: _col0 (type: int)
                   sort order: +
-                  Map-reduce partition columns: _col1 (type: int)
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: b
@@ -90,7 +90,7 @@ STAGE PLANS:
           condition map:
                Inner Join 0 to 1
           keys:
-            0 _col1 (type: int)
+            0 _col0 (type: int)
             1 _col0 (type: int)
           Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
           Select Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
index 1aa9727..a1543d9 100644
--- a/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
@@ -89,7 +89,7 @@ POSTHOOK: Input: default@oneline
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@foo
 POSTHOOK: Lineage: foo.a SIMPLE []
-POSTHOOK: Lineage: foo.b EXPRESSION []
+POSTHOOK: Lineage: foo.b SIMPLE []
 PREHOOK: query: drop table foo
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@foo

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/union_fast_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_fast_stats.q.out b/ql/src/test/results/clientpositive/union_fast_stats.q.out
index f0879af..c3e9664 100644
--- a/ql/src/test/results/clientpositive/union_fast_stats.q.out
+++ b/ql/src/test/results/clientpositive/union_fast_stats.q.out
@@ -55,7 +55,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -83,7 +83,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -99,13 +99,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all
@@ -388,7 +388,7 @@ POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
@@ -416,7 +416,7 @@ POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc
 POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -432,13 +432,13 @@ POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesor
 POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE []
 POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
 POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
-POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE []
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/union_offcbo.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_offcbo.q.out b/ql/src/test/results/clientpositive/union_offcbo.q.out
index 7eff464..71c3bfc 100644
--- a/ql/src/test/results/clientpositive/union_offcbo.q.out
+++ b/ql/src/test/results/clientpositive/union_offcbo.q.out
@@ -629,7 +629,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: ((CASE WHEN ((_col7 is not null and _col0 is null and (_col3 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col7 is not null and _col0 is null and (_col3 <= '2016-02-05'))) THEN ('RET') WHEN (((_col7 = _col0) and (_col8 <> _col1))) THEN ('A_INS') ELSE ('NA') END <> 'RET') and ((NVL(_col0,-1) <> NVL(_col7,-1)) or (NVL(_col1,-1) <> NVL(_col8,-1)))) (type: boolean)
+            predicate: ((CASE WHEN ((_col7 is not null and _col0 is null and (_col3 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col7 is not null and _col0 is null and (_col3 <= '2016-02-05'))) THEN ('RET') WHEN (((_col7 = _col0) and (_col8 <> _col1))) THEN ('A_INS') ELSE ('NA') END <> 'RET') and (not ((NVL(_col0,-1) = NVL(_col7,-1)) and (NVL(_col1,-1) = NVL(_col8,-1))))) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col2 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col4 (type: string), _col7 (type: string), _col8 (type: string), CASE WHEN ((_col7 is not null and _col0 is null and (_col3 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col7 is not null and _col0 is null and (_col3 <= '2016-02-05'))) THEN ('RET') WHEN (((_col7 = _col0) and (_col8 <> _col1))) THEN ('A_INS') ELSE ('NA') END (type: string)
@@ -684,15 +684,15 @@ STAGE PLANS:
               predicate: ((ts1 = '2015-11-20') and reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(id1)) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: id1 (type: bigint), '2015-11-20' (type: string), sts (type: string), at1 (type: bigint), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(id1)) (type: string), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(at1)) (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                expressions: id1 (type: bigint), sts (type: string), at1 (type: bigint), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(id1)) (type: string), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(at1)) (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col4 (type: string)
+                  key expressions: _col3 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col4 (type: string)
+                  Map-reduce partition columns: _col3 (type: string)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  value expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: string), _col3 (type: bigint), _col5 (type: string)
+                  value expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: bigint), _col4 (type: string)
           TableScan
             alias: ttest2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -714,15 +714,15 @@ STAGE PLANS:
           condition map:
                Left Outer Join0 to 1
           keys:
-            0 _col4 (type: string)
+            0 _col3 (type: string)
             1 _col1 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: ((CASE WHEN ((_col7 is not null and _col4 is null and (_col6 <= _col1))) THEN ('DEL') WHEN (((_col7 is null and _col4 is not null) or ((_col7 = _col4) and (_col8 <> _col5)))) THEN ('INS') ELSE ('NA') END <> 'RET') and ((NVL(_col4,-1) <> NVL(_col7,-1)) or (NVL(_col5,-1) <> NVL(_col8,-1)))) (type: boolean)
+            predicate: ((CASE WHEN ((_col6 is not null and _col3 is null and (_col5 <= '2015-11-20'))) THEN ('DEL') WHEN (((_col6 is null and _col3 is not null) or ((_col6 = _col3) and (_col7 <> _col4)))) THEN ('INS') ELSE ('NA') END <> 'RET') and (not ((NVL(_col3,-1) = NVL(_col6,-1)) and (NVL(_col4,-1) = NVL(_col7,-1))))) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: _col0 (type: bigint), _col2 (type: string), _col3 (type: bigint), '2099-12-31' (type: string), _col4 (type: string), _col5 (type: string), CASE WHEN ((_col7 is not null and _col4 is null and (_col6 <= _col1))) THEN ('DEL') WHEN (((_col7 is null and _col4 is not null) or ((_col7 = _col4) and (_col8 <> _col5)))) THEN ('INS') ELSE ('NA') END (type: string)
+              expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: bigint), '2099-12-31' (type: string), _col3 (type: string), _col4 (type: string), CASE WHEN ((_col6 is not null and _col3 is null and (_col5 <= '2015-11-20'))) THEN ('DEL') WHEN (((_col6 is null and _col3 is not null) or ((_col6 = _col3) and (_col7 <> _col4)))) THEN ('INS') ELSE ('NA') END (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator
@@ -1652,7 +1652,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: ((CASE WHEN ((_col7 is not null and _col0 is null and (_col3 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col7 is not null and _col0 is null and (_col3 <= '2016-02-05'))) THEN ('RET') WHEN (((_col7 = _col0) and (_col8 <> _col1))) THEN ('A_INS') ELSE ('NA') END <> 'RET') and ((NVL(_col0,-1) <> NVL(_col7,-1)) or (NVL(_col1,-1) <> NVL(_col8,-1)))) (type: boolean)
+            predicate: ((CASE WHEN ((_col7 is not null and _col0 is null and (_col3 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col7 is not null and _col0 is null and (_col3 <= '2016-02-05'))) THEN ('RET') WHEN (((_col7 = _col0) and (_col8 <> _col1))) THEN ('A_INS') ELSE ('NA') END <> 'RET') and (not ((NVL(_col0,-1) = NVL(_col7,-1)) and (NVL(_col1,-1) = NVL(_col8,-1))))) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col2 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col4 (type: string), _col7 (type: string), _col8 (type: string), CASE WHEN ((_col7 is not null and _col0 is null and (_col3 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col7 is not null and _col0 is null and (_col3 <= '2016-02-05'))) THEN ('RET') WHEN (((_col7 = _col0) and (_col8 <> _col1))) THEN ('A_INS') ELSE ('NA') END (type: string)
@@ -1707,15 +1707,15 @@ STAGE PLANS:
               predicate: ((ts1 = '2015-11-20') and reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(id1)) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
-                expressions: id1 (type: bigint), '2015-11-20' (type: string), sts (type: string), at1 (type: bigint), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(id1)) (type: string), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(at1)) (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                expressions: id1 (type: bigint), sts (type: string), at1 (type: bigint), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(id1)) (type: string), reflect('org.apache.commons.codec.digest.DigestUtils','sha256Hex',concat(at1)) (type: string)
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col4 (type: string)
+                  key expressions: _col3 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col4 (type: string)
+                  Map-reduce partition columns: _col3 (type: string)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  value expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: string), _col3 (type: bigint), _col5 (type: string)
+                  value expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: bigint), _col4 (type: string)
           TableScan
             alias: ttest2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1737,15 +1737,15 @@ STAGE PLANS:
           condition map:
                Left Outer Join0 to 1
           keys:
-            0 _col4 (type: string)
+            0 _col3 (type: string)
             1 _col1 (type: string)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: ((CASE WHEN ((_col7 is not null and _col4 is null and (_col6 <= _col1))) THEN ('DEL') WHEN (((_col7 is null and _col4 is not null) or ((_col7 = _col4) and (_col8 <> _col5)))) THEN ('INS') ELSE ('NA') END <> 'RET') and ((NVL(_col4,-1) <> NVL(_col7,-1)) or (NVL(_col5,-1) <> NVL(_col8,-1)))) (type: boolean)
+            predicate: ((CASE WHEN ((_col6 is not null and _col3 is null and (_col5 <= '2015-11-20'))) THEN ('DEL') WHEN (((_col6 is null and _col3 is not null) or ((_col6 = _col3) and (_col7 <> _col4)))) THEN ('INS') ELSE ('NA') END <> 'RET') and (not ((NVL(_col3,-1) = NVL(_col6,-1)) and (NVL(_col4,-1) = NVL(_col7,-1))))) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: _col0 (type: bigint), _col2 (type: string), _col3 (type: bigint), '2099-12-31' (type: string), _col4 (type: string), _col5 (type: string), CASE WHEN ((_col7 is not null and _col4 is null and (_col6 <= _col1))) THEN ('DEL') WHEN (((_col7 is null and _col4 is not null) or ((_col7 = _col4) and (_col8 <> _col5)))) THEN ('INS') ELSE ('NA') END (type: string)
+              expressions: _col0 (type: bigint), _col1 (type: string), _col2 (type: bigint), '2099-12-31' (type: string), _col3 (type: string), _col4 (type: string), CASE WHEN ((_col6 is not null and _col3 is null and (_col5 <= '2015-11-20'))) THEN ('DEL') WHEN (((_col6 is null and _col3 is not null) or ((_col6 = _col3) and (_col7 <> _col4)))) THEN ('INS') ELSE ('NA') END (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/union_remove_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out
index 2b42538..5f73c9a 100644
--- a/ql/src/test/results/clientpositive/union_remove_12.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_12.q.out
@@ -87,7 +87,7 @@ STAGE PLANS:
             alias: inputtbl1
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), UDFToLong('1') (type: bigint)
+              expressions: key (type: string), 1 (type: bigint)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/union_remove_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out
index a754dd4..52dc7c5 100644
--- a/ql/src/test/results/clientpositive/union_remove_14.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_14.q.out
@@ -89,7 +89,7 @@ STAGE PLANS:
             alias: inputtbl1
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: key (type: string), UDFToLong('1') (type: bigint)
+              expressions: key (type: string), 1 (type: bigint)
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/union_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_view.q.out b/ql/src/test/results/clientpositive/union_view.q.out
index 530739e..90e240a 100644
--- a/ql/src/test/results/clientpositive/union_view.q.out
+++ b/ql/src/test/results/clientpositive/union_view.q.out
@@ -706,14 +706,14 @@ STAGE PLANS:
                 Union
                   Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col1, _col2
+                    expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col2 (type: string)
                       sort order: +
                       Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
+                      value expressions: _col0 (type: int), _col1 (type: string)
           TableScan
             alias: src_union_2
             filterExpr: ((key = 86) and ds is not null) (type: boolean)
@@ -728,14 +728,14 @@ STAGE PLANS:
                 Union
                   Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col1, _col2
+                    expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col2 (type: string)
                       sort order: +
                       Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
+                      value expressions: _col0 (type: int), _col1 (type: string)
           TableScan
             alias: src_union_3
             filterExpr: ((key = 86) and ds is not null) (type: boolean)
@@ -750,17 +750,17 @@ STAGE PLANS:
                 Union
                   Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: _col0 (type: string), _col1 (type: string)
-                    outputColumnNames: _col1, _col2
+                    expressions: 86 (type: int), _col0 (type: string), _col1 (type: string)
+                    outputColumnNames: _col0, _col1, _col2
                     Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col2 (type: string)
                       sort order: +
                       Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col1 (type: string)
+                      value expressions: _col0 (type: int), _col1 (type: string)
       Reduce Operator Tree:
         Select Operator
-          expressions: 86 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string)
+          expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 1250 Data size: 13280 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
index ba3a0b8..5e166e6 100644
--- a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
+++ b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out
@@ -386,20 +386,18 @@ STAGE PLANS:
               predicate: (f1 = 1) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
+                expressions: 1 (type: int)
+                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           TableScan
             alias: union_all_bug_test_2
             Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE
@@ -407,20 +405,18 @@ STAGE PLANS:
               predicate: (f1 = 1) (type: boolean)
               Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
               Select Operator
+                expressions: 1 (type: int)
+                outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
                 Union
                   Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: 1 (type: int)
-                    outputColumnNames: _col0
+                  File Output Operator
+                    compressed: false
                     Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                    File Output Operator
-                      compressed: false
-                      Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE
-                      table:
-                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    table:
+                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_columns.q.out b/ql/src/test/results/clientpositive/vector_between_columns.q.out
index 5faa79b..5143074 100644
--- a/ql/src/test/results/clientpositive/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_columns.q.out
@@ -70,7 +70,7 @@ POSTHOOK: Output: default@TINT
 POSTHOOK: Lineage: tint.cint SIMPLE [(tint_txt)tint_txt.FieldSchema(name:cint, type:int, comment:null), ]
 POSTHOOK: Lineage: tint.rnum SIMPLE [(tint_txt)tint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tint_txt.rnum	tint_txt.cint
-Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain
 select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint
 PREHOOK: type: QUERY
@@ -145,7 +145,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: select tint.rnum, tsint.rnum, tint.cint, tsint.csint from tint , tsint where tint.cint between tsint.csint and tsint.csint
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tint

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
index 2169db9..d9c027a 100644
--- a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
@@ -190,7 +190,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(*))
 FROM hundredorc t1 JOIN hundredorc t2 ON t2.bin = t2.bin
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_coalesce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_coalesce.q.out b/ql/src/test/results/clientpositive/vector_coalesce.q.out
index e126dcb..21a9122 100644
--- a/ql/src/test/results/clientpositive/vector_coalesce.q.out
+++ b/ql/src/test/results/clientpositive/vector_coalesce.q.out
@@ -40,7 +40,7 @@ STAGE PLANS:
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: null (type: double), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: smallint), KEY.reducesinkkey5 (type: string)
+          expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: float), KEY.reducesinkkey4 (type: smallint), KEY.reducesinkkey5 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Limit
@@ -124,7 +124,7 @@ STAGE PLANS:
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: null (type: tinyint), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: double)
+          expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: int), KEY.reducesinkkey3 (type: double)
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Limit
@@ -206,7 +206,7 @@ STAGE PLANS:
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: null (type: float), null (type: bigint), 0.0 (type: float)
+          expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey2 (type: float)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
           Limit
@@ -372,7 +372,7 @@ STAGE PLANS:
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: null (type: float), null (type: bigint), null (type: float)
+          expressions: KEY.reducesinkkey0 (type: float), KEY.reducesinkkey1 (type: bigint), KEY.reducesinkkey0 (type: float)
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 3072 Data size: 660491 Basic stats: COMPLETE Column stats: NONE
           Limit
@@ -443,7 +443,7 @@ STAGE PLANS:
               predicate: cbigint is null (type: boolean)
               Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: null (type: bigint), ctinyint (type: tinyint), COALESCE(null,ctinyint) (type: bigint)
+                expressions: null (type: bigint), ctinyint (type: tinyint), COALESCE(null,ctinyint) (type: tinyint)
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_date_1.q.out b/ql/src/test/results/clientpositive/vector_date_1.q.out
index da608bf..a394e0f 100644
--- a/ql/src/test/results/clientpositive/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_date_1.q.out
@@ -617,7 +617,7 @@ STAGE PLANS:
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 2001-01-01 (type: date), VALUE._col0 (type: date)
+          expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date)
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_decimal_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_2.q.out
index 5e5b36c..892a1b3 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_2.q.out
@@ -914,15 +914,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 3.14 (type: decimal(4,2))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 3.14 (type: decimal(3,2))
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(4,2))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 3.14 (type: decimal(4,2))
+          expressions: VALUE._col0 (type: decimal(4,2))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -966,15 +969,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 3.14 (type: decimal(4,2))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 3.14 (type: decimal(3,2))
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(4,2))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 3.14 (type: decimal(4,2))
+          expressions: VALUE._col0 (type: decimal(4,2))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1018,15 +1024,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 1355944339.1234567 (type: decimal(30,8))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 1355944339.1234567 (type: decimal(17,7))
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(30,8))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 1355944339.1234567 (type: decimal(30,8))
+          expressions: VALUE._col0 (type: decimal(30,8))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1070,15 +1079,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 1 (type: decimal(10,0))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 1 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(10,0))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 1 (type: decimal(10,0))
+          expressions: VALUE._col0 (type: decimal(10,0))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1113,15 +1125,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 1 (type: decimal(10,0))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 1 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(10,0))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 1 (type: decimal(10,0))
+          expressions: VALUE._col0 (type: decimal(10,0))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1165,15 +1180,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 3 (type: decimal(10,0))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 3 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(10,0))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 3 (type: decimal(10,0))
+          expressions: VALUE._col0 (type: decimal(10,0))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1217,15 +1235,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 3 (type: decimal(10,0))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 3 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(10,0))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 3 (type: decimal(10,0))
+          expressions: VALUE._col0 (type: decimal(10,0))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1269,15 +1290,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 3 (type: decimal(10,0))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 3 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(10,0))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 3 (type: decimal(10,0))
+          expressions: VALUE._col0 (type: decimal(10,0))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1321,15 +1345,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 3 (type: decimal(10,0))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 3 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(10,0))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 3 (type: decimal(10,0))
+          expressions: VALUE._col0 (type: decimal(10,0))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1373,15 +1400,18 @@ STAGE PLANS:
             alias: decimal_2
             Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: 1 (type: decimal(20,19))
+              outputColumnNames: _col0
               Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 1 (type: int)
                 sort order: +
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                value expressions: _col0 (type: decimal(20,19))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 1 (type: decimal(20,19))
+          expressions: VALUE._col0 (type: decimal(20,19))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
@@ -1433,7 +1463,7 @@ STAGE PLANS:
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 0.99999999999999999999 (type: decimal(20,20))
+          expressions: KEY.reducesinkkey0 (type: decimal(20,20))
           outputColumnNames: _col0
           Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
index 4924bff..5b321c2 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
@@ -431,18 +431,18 @@ STAGE PLANS:
             alias: decimal_tbl_4_orc
             Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9))
-              outputColumnNames: _col0, _col1
+              expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9)), 1809242.315111134 (type: decimal(17,9)), -1809242.315111134 (type: decimal(17,9))
+              outputColumnNames: _col0, _col1, _col2, _col3
               Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: decimal(30,9))
                 sort order: +
                 Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col1 (type: decimal(30,9))
+                value expressions: _col1 (type: decimal(30,9)), _col2 (type: decimal(17,9)), _col3 (type: decimal(17,9))
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), 1809242.315111134 (type: decimal(17,9)), -1809242.315111134 (type: decimal(17,9))
+          expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), VALUE._col1 (type: decimal(17,9)), VALUE._col2 (type: decimal(17,9))
           outputColumnNames: _col0, _col1, _col2, _col3
           Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
index d367b1e..55262de 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_mapjoin.q.out
@@ -1,4 +1,4 @@
-Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: -- HIVE-12738 -- We are checking if a MapJoin after a GroupBy will work properly.
 explain
 select *
@@ -148,7 +148,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: select *
 from src
 where not key in

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_interval_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_1.q.out b/ql/src/test/results/clientpositive/vector_interval_1.q.out
index 6845628..379747c 100644
--- a/ql/src/test/results/clientpositive/vector_interval_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_1.q.out
@@ -148,18 +148,18 @@ STAGE PLANS:
             alias: vector_interval_1
             Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: dt (type: date), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month)
-              outputColumnNames: _col0, _col2, _col3, _col5, _col6
+              expressions: dt (type: date), 2-4 (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 + CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), 0-0 (type: interval_year_month), (CAST( str1 AS INTERVAL YEAR TO MONTH) - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month), (1-2 - CAST( str1 AS INTERVAL YEAR TO MONTH)) (type: interval_year_month)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
               Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: _col0 (type: date)
                 sort order: +
                 Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
+                value expressions: _col1 (type: interval_year_month), _col2 (type: interval_year_month), _col3 (type: interval_year_month), _col4 (type: interval_year_month), _col5 (type: interval_year_month), _col6 (type: interval_year_month)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: KEY.reducesinkkey0 (type: date), 2-4 (type: interval_year_month), VALUE._col1 (type: interval_year_month), VALUE._col2 (type: interval_year_month), 0-0 (type: interval_year_month), VALUE._col4 (type: interval_year_month), VALUE._col5 (type: interval_year_month)
+          expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: interval_year_month), VALUE._col1 (type: interval_year_month), VALUE._col2 (type: interval_year_month), VALUE._col3 (type: interval_year_month), VALUE._col4 (type: interval_year_month), VALUE._col5 (type: interval_year_month)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
           Statistics: Num rows: 2 Data size: 442 Basic stats: COMPLETE Column stats: NONE
           File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
index cd8111d..bad7e4a 100644
--- a/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_arithmetic.q.out
@@ -480,16 +480,19 @@ STAGE PLANS:
             alias: interval_arithmetic_1
             Statistics: Num rows: 50 Data size: 4800 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
+              expressions: -1-1 (type: interval_year_month)
+              outputColumnNames: _col1
               Statistics: Num rows: 50 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
               Reduce Output Operator
                 key expressions: 5-5 (type: interval_year_month)
                 sort order: +
                 Statistics: Num rows: 50 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
                 TopN Hash Memory Usage: 0.1
+                value expressions: _col1 (type: interval_year_month)
       Execution mode: vectorized
       Reduce Operator Tree:
         Select Operator
-          expressions: 5-5 (type: interval_year_month), -1-1 (type: interval_year_month)
+          expressions: KEY.reducesinkkey0 (type: interval_year_month), VALUE._col0 (type: interval_year_month)
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 50 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE
           Limit

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_join_filters.q.out b/ql/src/test/results/clientpositive/vector_join_filters.q.out
index 999fee7..30003d7 100644
--- a/ql/src/test/results/clientpositive/vector_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/vector_join_filters.q.out
@@ -30,7 +30,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
 POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
-Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/vector_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_join_nulls.q.out b/ql/src/test/results/clientpositive/vector_join_nulls.q.out
index 9011a1f..fdffbb6 100644
--- a/ql/src/test/results/clientpositive/vector_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/vector_join_nulls.q.out
@@ -30,7 +30,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
 POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
 POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -40,7 +40,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 13630578
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1


[16/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
index e810747..85e66d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdPredicates.java
@@ -18,44 +18,24 @@
 package org.apache.hadoop.hive.ql.optimizer.calcite.stats;
 
 import java.util.ArrayList;
-import java.util.BitSet;
-import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.SortedMap;
 
-import org.apache.calcite.linq4j.Linq4j;
 import org.apache.calcite.linq4j.Ord;
-import org.apache.calcite.linq4j.function.Predicate1;
-import org.apache.calcite.plan.RelOptCluster;
 import org.apache.calcite.plan.RelOptPredicateList;
 import org.apache.calcite.plan.RelOptUtil;
 import org.apache.calcite.rel.RelNode;
-import org.apache.calcite.rel.core.Join;
-import org.apache.calcite.rel.core.JoinRelType;
 import org.apache.calcite.rel.core.Project;
-import org.apache.calcite.rel.core.SemiJoin;
 import org.apache.calcite.rel.metadata.ReflectiveRelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMdPredicates;
 import org.apache.calcite.rel.metadata.RelMetadataProvider;
 import org.apache.calcite.rel.metadata.RelMetadataQuery;
-import org.apache.calcite.rel.type.RelDataType;
-import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
 import org.apache.calcite.rex.RexCall;
 import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
-import org.apache.calcite.rex.RexPermutationShuttle;
 import org.apache.calcite.rex.RexPermuteInputsShuttle;
-import org.apache.calcite.rex.RexShuttle;
-import org.apache.calcite.rex.RexUtil;
-import org.apache.calcite.rex.RexVisitorImpl;
-import org.apache.calcite.sql.SqlKind;
 import org.apache.calcite.sql.fun.SqlStdOperatorTable;
-import org.apache.calcite.util.BitSets;
 import org.apache.calcite.util.BuiltInMethod;
 import org.apache.calcite.util.ImmutableBitSet;
 import org.apache.calcite.util.mapping.Mapping;
@@ -63,13 +43,8 @@ import org.apache.calcite.util.mapping.MappingType;
 import org.apache.calcite.util.mapping.Mappings;
 import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
 
-import com.google.common.base.Function;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
 
 //TODO: Move this to calcite
@@ -151,511 +126,4 @@ public class HiveRelMdPredicates extends RelMdPredicates {
     return RelOptPredicateList.of(projectPullUpPredicates);
   }
 
-  /** Infers predicates for a {@link org.apache.calcite.rel.core.Join}. */
-  @Override
-  public RelOptPredicateList getPredicates(Join join, RelMetadataQuery mq) {
-    RexBuilder rB = join.getCluster().getRexBuilder();
-    RelNode left = join.getInput(0);
-    RelNode right = join.getInput(1);
-
-    RelOptPredicateList leftInfo = mq.getPulledUpPredicates(left);
-    RelOptPredicateList rightInfo = mq.getPulledUpPredicates(right);
-
-    HiveJoinConditionBasedPredicateInference jI = new HiveJoinConditionBasedPredicateInference(join,
-        RexUtil.composeConjunction(rB, leftInfo.pulledUpPredicates, false),
-        RexUtil.composeConjunction(rB, rightInfo.pulledUpPredicates, false));
-
-    return jI.inferPredicates(false);
-  }
-
-  /**
-   * Utility to infer predicates from one side of the join that apply on the
-   * other side.
-   *
-   * <p>Contract is:<ul>
-   *
-   * <li>initialize with a {@link org.apache.calcite.rel.core.Join} and
-   * optional predicates applicable on its left and right subtrees.
-   *
-   * <li>you can
-   * then ask it for equivalentPredicate(s) given a predicate.
-   *
-   * </ul>
-   *
-   * <p>So for:
-   * <ol>
-   * <li>'<code>R1(x) join R2(y) on x = y</code>' a call for
-   * equivalentPredicates on '<code>x > 7</code>' will return '
-   * <code>[y > 7]</code>'
-   * <li>'<code>R1(x) join R2(y) on x = y join R3(z) on y = z</code>' a call for
-   * equivalentPredicates on the second join '<code>x > 7</code>' will return '
-   * <code>[y > 7, z > 7]</code>'
-   * </ol>
-   */
-  static class HiveJoinConditionBasedPredicateInference {
-    final Join joinRel;
-    final boolean isSemiJoin;
-    final int nSysFields;
-    final int nFieldsLeft;
-    final int nFieldsRight;
-    final ImmutableBitSet leftFieldsBitSet;
-    final ImmutableBitSet rightFieldsBitSet;
-    final ImmutableBitSet allFieldsBitSet;
-    SortedMap<Integer, BitSet> equivalence;
-    final Map<String, ImmutableBitSet> exprFields;
-    final Set<String> allExprsDigests;
-    final Set<String> equalityPredicates;
-    final RexNode leftChildPredicates;
-    final RexNode rightChildPredicates;
-
-    public HiveJoinConditionBasedPredicateInference(Join joinRel,
-            RexNode lPreds, RexNode rPreds) {
-      this(joinRel, joinRel instanceof SemiJoin, lPreds, rPreds);
-    }
-
-    private HiveJoinConditionBasedPredicateInference(Join joinRel, boolean isSemiJoin,
-        RexNode lPreds, RexNode rPreds) {
-      super();
-      this.joinRel = joinRel;
-      this.isSemiJoin = isSemiJoin;
-      nFieldsLeft = joinRel.getLeft().getRowType().getFieldList().size();
-      nFieldsRight = joinRel.getRight().getRowType().getFieldList().size();
-      nSysFields = joinRel.getSystemFieldList().size();
-      leftFieldsBitSet = ImmutableBitSet.range(nSysFields,
-          nSysFields + nFieldsLeft);
-      rightFieldsBitSet = ImmutableBitSet.range(nSysFields + nFieldsLeft,
-          nSysFields + nFieldsLeft + nFieldsRight);
-      allFieldsBitSet = ImmutableBitSet.range(0,
-          nSysFields + nFieldsLeft + nFieldsRight);
-
-      exprFields = Maps.newHashMap();
-      allExprsDigests = new HashSet<String>();
-
-      if (lPreds == null) {
-        leftChildPredicates = null;
-      } else {
-        Mappings.TargetMapping leftMapping = Mappings.createShiftMapping(
-            nSysFields + nFieldsLeft, nSysFields, 0, nFieldsLeft);
-        leftChildPredicates = lPreds.accept(
-            new RexPermuteInputsShuttle(leftMapping, joinRel.getInput(0)));
-
-        for (RexNode r : RelOptUtil.conjunctions(leftChildPredicates)) {
-          exprFields.put(r.toString(), RelOptUtil.InputFinder.bits(r));
-          allExprsDigests.add(r.toString());
-        }
-      }
-      if (rPreds == null) {
-        rightChildPredicates = null;
-      } else {
-        Mappings.TargetMapping rightMapping = Mappings.createShiftMapping(
-            nSysFields + nFieldsLeft + nFieldsRight,
-            nSysFields + nFieldsLeft, 0, nFieldsRight);
-        rightChildPredicates = rPreds.accept(
-            new RexPermuteInputsShuttle(rightMapping, joinRel.getInput(1)));
-
-        for (RexNode r : RelOptUtil.conjunctions(rightChildPredicates)) {
-          exprFields.put(r.toString(), RelOptUtil.InputFinder.bits(r));
-          allExprsDigests.add(r.toString());
-        }
-      }
-
-      equivalence = Maps.newTreeMap();
-      equalityPredicates = new HashSet<String>();
-      for (int i = 0; i < nSysFields + nFieldsLeft + nFieldsRight; i++) {
-        equivalence.put(i, BitSets.of(i));
-      }
-
-      // Only process equivalences found in the join conditions. Processing
-      // Equivalences from the left or right side infer predicates that are
-      // already present in the Tree below the join.
-      RexBuilder rexBuilder = joinRel.getCluster().getRexBuilder();
-      List<RexNode> exprs =
-          RelOptUtil.conjunctions(
-              compose(rexBuilder, ImmutableList.of(joinRel.getCondition())));
-
-      final EquivalenceFinder eF = new EquivalenceFinder();
-      new ArrayList<Void>(Lists.transform(exprs, new Function<RexNode, Void>() {
-        public Void apply(RexNode input) {
-          return input.accept(eF);
-        }
-      }));
-
-      equivalence = BitSets.closure(equivalence);
-    }
-
-    /**
-     * The PullUp Strategy is sound but not complete.
-     * <ol>
-     * <li>We only pullUp inferred predicates for now. Pulling up existing
-     * predicates causes an explosion of duplicates. The existing predicates are
-     * pushed back down as new predicates. Once we have rules to eliminate
-     * duplicate Filter conditions, we should pullUp all predicates.
-     * <li>For Left Outer: we infer new predicates from the left and set them as
-     * applicable on the Right side. No predicates are pulledUp.
-     * <li>Right Outer Joins are handled in an analogous manner.
-     * <li>For Full Outer Joins no predicates are pulledUp or inferred.
-     * </ol>
-     */
-    public RelOptPredicateList inferPredicates(
-        boolean includeEqualityInference) {
-      List<RexNode> inferredPredicates = new ArrayList<RexNode>();
-      Set<String> allExprsDigests = new HashSet<String>(this.allExprsDigests);
-      final JoinRelType joinType = joinRel.getJoinType();
-      switch (joinType) {
-      case INNER:
-      case LEFT:
-        infer(leftChildPredicates, allExprsDigests, inferredPredicates,
-            includeEqualityInference,
-            joinType == JoinRelType.LEFT ? rightFieldsBitSet
-                : allFieldsBitSet);
-        break;
-      }
-      switch (joinType) {
-      case INNER:
-      case RIGHT:
-        infer(rightChildPredicates, allExprsDigests, inferredPredicates,
-            includeEqualityInference,
-            joinType == JoinRelType.RIGHT ? leftFieldsBitSet
-                : allFieldsBitSet);
-        break;
-      }
-
-      Mappings.TargetMapping rightMapping = Mappings.createShiftMapping(
-          nSysFields + nFieldsLeft + nFieldsRight,
-          0, nSysFields + nFieldsLeft, nFieldsRight);
-      final HiveJoinRexPermuteInputsShuttle rightPermute =
-          new HiveJoinRexPermuteInputsShuttle(rightMapping, joinRel);
-      Mappings.TargetMapping leftMapping = Mappings.createShiftMapping(
-          nSysFields + nFieldsLeft, 0, nSysFields, nFieldsLeft);
-      final HiveJoinRexPermuteInputsShuttle leftPermute =
-          new HiveJoinRexPermuteInputsShuttle(leftMapping, joinRel);
-
-      List<RexNode> leftInferredPredicates = new ArrayList<RexNode>();
-      List<RexNode> rightInferredPredicates = new ArrayList<RexNode>();
-
-      for (RexNode iP : inferredPredicates) {
-        ImmutableBitSet iPBitSet = RelOptUtil.InputFinder.bits(iP);
-        if (iPBitSet.isEmpty() && joinType == JoinRelType.INNER) {
-          leftInferredPredicates.add(iP);
-          rightInferredPredicates.add(iP);
-        } else if (iPBitSet.isEmpty() && joinType == JoinRelType.LEFT) {
-          rightInferredPredicates.add(iP);
-        } else if (iPBitSet.isEmpty() && joinType == JoinRelType.RIGHT) {
-          leftInferredPredicates.add(iP);
-        } else if (leftFieldsBitSet.contains(iPBitSet)) {
-          leftInferredPredicates.add(iP.accept(leftPermute));
-        } else if (rightFieldsBitSet.contains(iPBitSet)) {
-          rightInferredPredicates.add(iP.accept(rightPermute));
-        }
-      }
-
-      switch (joinType) {
-      case INNER:
-        Iterable<RexNode> pulledUpPredicates;
-        if (isSemiJoin) {
-          pulledUpPredicates = Iterables.concat(
-                RelOptUtil.conjunctions(leftChildPredicates),
-                leftInferredPredicates);
-        } else {
-          pulledUpPredicates = Iterables.concat(
-                RelOptUtil.conjunctions(leftChildPredicates),
-                RelOptUtil.conjunctions(rightChildPredicates),
-                RelOptUtil.conjunctions(joinRel.getCondition()),
-                inferredPredicates);
-        }
-        return RelOptPredicateList.of(pulledUpPredicates,
-          leftInferredPredicates, rightInferredPredicates);
-      case LEFT:
-        return RelOptPredicateList.of(
-            RelOptUtil.conjunctions(leftChildPredicates),
-            EMPTY_LIST, rightInferredPredicates);
-      case RIGHT:
-        return RelOptPredicateList.of(
-            RelOptUtil.conjunctions(rightChildPredicates),
-            leftInferredPredicates, EMPTY_LIST);
-      default:
-        assert inferredPredicates.size() == 0;
-        return RelOptPredicateList.EMPTY;
-      }
-    }
-
-    public RexNode left() {
-      return leftChildPredicates;
-    }
-
-    public RexNode right() {
-      return rightChildPredicates;
-    }
-
-    private void infer(RexNode predicates, Set<String> allExprsDigests,
-        List<RexNode> inferedPredicates, boolean includeEqualityInference,
-        ImmutableBitSet inferringFields) {
-      for (RexNode r : RelOptUtil.conjunctions(predicates)) {
-        if (r.isAlwaysFalse()) {
-          RexLiteral falseVal =
-                  joinRel.getCluster().getRexBuilder().makeLiteral(false);
-          inferedPredicates.add(falseVal);
-          allExprsDigests.add(falseVal.toString());
-          continue;
-        }
-        if (!includeEqualityInference
-            && equalityPredicates.contains(r.toString())) {
-          continue;
-        }
-        for (Mapping m : mappings(r)) {
-          RexNode tr = r.accept(
-              new RexPermuteInputsShuttle(m, joinRel.getInput(0),
-                  joinRel.getInput(1)));
-          if (inferringFields.contains(RelOptUtil.InputFinder.bits(tr))
-              && !allExprsDigests.contains(tr.toString())
-              && !isAlwaysTrue(tr)) {
-            inferedPredicates.add(tr);
-            allExprsDigests.add(tr.toString());
-          }
-        }
-      }
-    }
-
-    Iterable<Mapping> mappings(final RexNode predicate) {
-      return new Iterable<Mapping>() {
-        public Iterator<Mapping> iterator() {
-          ImmutableBitSet fields = exprFields.get(predicate.toString());
-          if (fields.cardinality() == 0) {
-            return Iterators.emptyIterator();
-          }
-          return new ExprsItr(fields);
-        }
-      };
-    }
-
-    private void equivalent(int p1, int p2) {
-      BitSet b = equivalence.get(p1);
-      b.set(p2);
-
-      b = equivalence.get(p2);
-      b.set(p1);
-    }
-
-    RexNode compose(RexBuilder rexBuilder, Iterable<RexNode> exprs) {
-      exprs = Linq4j.asEnumerable(exprs).where(new Predicate1<RexNode>() {
-        public boolean apply(RexNode expr) {
-          return expr != null;
-        }
-      });
-      return RexUtil.composeConjunction(rexBuilder, exprs, false);
-    }
-
-    /**
-     * Find expressions of the form 'col_x = col_y'.
-     */
-    class EquivalenceFinder extends RexVisitorImpl<Void> {
-      protected EquivalenceFinder() {
-        super(true);
-      }
-
-      @Override public Void visitCall(RexCall call) {
-        if (call.getOperator().getKind() == SqlKind.EQUALS) {
-          int lPos = pos(call.getOperands().get(0));
-          int rPos = pos(call.getOperands().get(1));
-          if (lPos != -1 && rPos != -1) {
-            HiveJoinConditionBasedPredicateInference.this.equivalent(lPos, rPos);
-            HiveJoinConditionBasedPredicateInference.this.equalityPredicates
-                .add(call.toString());
-          }
-        }
-        return null;
-      }
-    }
-
-    /**
-     * Given an expression returns all the possible substitutions.
-     *
-     * <p>For example, for an expression 'a + b + c' and the following
-     * equivalences: <pre>
-     * a : {a, b}
-     * b : {a, b}
-     * c : {c, e}
-     * </pre>
-     *
-     * <p>The following Mappings will be returned:
-     * <pre>
-     * {a->a, b->a, c->c}
-     * {a->a, b->a, c->e}
-     * {a->a, b->b, c->c}
-     * {a->a, b->b, c->e}
-     * {a->b, b->a, c->c}
-     * {a->b, b->a, c->e}
-     * {a->b, b->b, c->c}
-     * {a->b, b->b, c->e}
-     * </pre>
-     *
-     * <p>which imply the following inferences:
-     * <pre>
-     * a + a + c
-     * a + a + e
-     * a + b + c
-     * a + b + e
-     * b + a + c
-     * b + a + e
-     * b + b + c
-     * b + b + e
-     * </pre>
-     */
-    class ExprsItr implements Iterator<Mapping> {
-      final int[] columns;
-      final BitSet[] columnSets;
-      final int[] iterationIdx;
-      Mapping nextMapping;
-      boolean firstCall;
-
-      ExprsItr(ImmutableBitSet fields) {
-        nextMapping = null;
-        columns = new int[fields.cardinality()];
-        columnSets = new BitSet[fields.cardinality()];
-        iterationIdx = new int[fields.cardinality()];
-        for (int j = 0, i = fields.nextSetBit(0); i >= 0; i = fields
-            .nextSetBit(i + 1), j++) {
-          columns[j] = i;
-          columnSets[j] = equivalence.get(i);
-          iterationIdx[j] = 0;
-        }
-        firstCall = true;
-      }
-
-      public boolean hasNext() {
-        if (firstCall) {
-          initializeMapping();
-          firstCall = false;
-        } else {
-          computeNextMapping(iterationIdx.length - 1);
-        }
-        return nextMapping != null;
-      }
-
-      public Mapping next() {
-        return nextMapping;
-      }
-
-      public void remove() {
-        throw new UnsupportedOperationException();
-      }
-
-      private void computeNextMapping(int level) {
-        int t = columnSets[level].nextSetBit(iterationIdx[level]);
-        if (t < 0) {
-          if (level == 0) {
-            nextMapping = null;
-          } else {
-            iterationIdx[level] = 0;
-            computeNextMapping(level - 1);
-          }
-        } else {
-          nextMapping.set(columns[level], t);
-          iterationIdx[level] = t + 1;
-        }
-      }
-
-      private void initializeMapping() {
-        nextMapping = Mappings.create(MappingType.PARTIAL_FUNCTION,
-            nSysFields + nFieldsLeft + nFieldsRight,
-            nSysFields + nFieldsLeft + nFieldsRight);
-        for (int i = 0; i < columnSets.length; i++) {
-          BitSet c = columnSets[i];
-          int t = c.nextSetBit(iterationIdx[i]);
-          if (t < 0) {
-            nextMapping = null;
-            return;
-          }
-          nextMapping.set(columns[i], t);
-          iterationIdx[i] = t + 1;
-        }
-      }
-    }
-
-    private int pos(RexNode expr) {
-      if (expr instanceof RexInputRef) {
-        return ((RexInputRef) expr).getIndex();
-      }
-      return -1;
-    }
-
-    private boolean isAlwaysTrue(RexNode predicate) {
-      if (predicate instanceof RexCall) {
-        RexCall c = (RexCall) predicate;
-        if (c.getOperator().getKind() == SqlKind.EQUALS) {
-          int lPos = pos(c.getOperands().get(0));
-          int rPos = pos(c.getOperands().get(1));
-          return lPos != -1 && lPos == rPos;
-        }
-      }
-      return predicate.isAlwaysTrue();
-    }
-  }
-
-  /**
-   * Shuttle which applies a permutation to its input fields.
-   *
-   * @see RexPermutationShuttle
-   * @see RexUtil#apply(org.apache.calcite.util.mapping.Mappings.TargetMapping, RexNode)
-   */
-  public static class HiveJoinRexPermuteInputsShuttle extends RexShuttle {
-    //~ Instance fields --------------------------------------------------------
-
-    private final Mappings.TargetMapping mapping;
-    private final ImmutableList<RelDataTypeField> fields;
-    private final RelOptCluster cluster;
-    private final RelDataType rType;
-
-    //~ Constructors -----------------------------------------------------------
-
-    private HiveJoinRexPermuteInputsShuttle(
-        Mappings.TargetMapping mapping,
-        RelNode input) {
-      this.mapping = mapping;
-      this.cluster = input.getCluster();
-      this.rType = input.getRowType();
-      this.fields = ImmutableList.copyOf(rType.getFieldList());
-    }
-
-    //~ Methods ----------------------------------------------------------------
-
-    private static ImmutableList<RelDataTypeField> fields(RelNode[] inputs) {
-      final ImmutableList.Builder<RelDataTypeField> fields =
-          ImmutableList.builder();
-      for (RelNode input : inputs) {
-        fields.addAll(input.getRowType().getFieldList());
-      }
-      return fields.build();
-    }
-
-    @Override public RexNode visitInputRef(RexInputRef local) {
-      final int index = local.getIndex();
-      int target = mapping.getTarget(index);
-      return new RexInputRef(
-          target,
-          fields.get(index).getType());
-    }
-
-    @Override public RexNode visitCall(RexCall call) {
-      if (call.getOperator() == RexBuilder.GET_OPERATOR) {
-        final String name =
-            (String) ((RexLiteral) call.getOperands().get(1)).getValue2();
-        final int i = lookup(fields, name);
-        if (i >= 0) {
-          return RexInputRef.of(i, fields);
-        }
-      }
-      return HiveCalciteUtil.getTypeSafePred(cluster, super.visitCall(call), rType);
-    }
-
-    private static int lookup(List<RelDataTypeField> fields, String name) {
-      for (int i = 0; i < fields.size(); i++) {
-        final RelDataTypeField field = fields.get(i);
-        if (field.getName().equals(name)) {
-          return i;
-        }
-      }
-      return -1;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
index de7e2f8..353d8db 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java
@@ -45,6 +45,7 @@ import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexOver;
+import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.rex.RexWindow;
 import org.apache.calcite.rex.RexWindowBound;
@@ -189,6 +190,10 @@ public class ASTConverter {
       int i = 0;
 
       for (RexNode r : select.getChildExps()) {
+        if (RexUtil.isNull(r) && r.getType().getSqlTypeName() != SqlTypeName.NULL) {
+          // It is NULL value with different type, we need to introduce a CAST to keep it
+          r = select.getCluster().getRexBuilder().makeAbstractCast(r.getType(), r);
+        }
         ASTNode expr = r.accept(new RexVisitor(schema, r instanceof RexLiteral));
         String alias = select.getRowType().getFieldNames().get(i++);
         ASTNode selectExpr = ASTBuilder.selectExpr(expr, alias);

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
index e51b6c4..2e498cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
@@ -36,6 +36,7 @@ import org.apache.calcite.rex.RexInputRef;
 import org.apache.calcite.rex.RexLiteral;
 import org.apache.calcite.rex.RexNode;
 import org.apache.calcite.rex.RexOver;
+import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.rex.RexVisitorImpl;
 import org.apache.calcite.rex.RexWindow;
 import org.apache.calcite.rex.RexWindowBound;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
 import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
+import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.Schema;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -79,6 +81,7 @@ import com.google.common.collect.ImmutableSet;
  */
 public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
 
+  private final boolean            foldExpr;
   private final String             tabAlias;
   private final RelDataType        inputRowType;
   private final ImmutableSet<Integer>       inputVCols;
@@ -89,16 +92,28 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
 
   public ExprNodeConverter(String tabAlias, RelDataType inputRowType,
       Set<Integer> vCols, RelDataTypeFactory dTFactory) {
-    this(tabAlias, null, inputRowType, null, vCols, dTFactory);
+    this(tabAlias, null, inputRowType, null, vCols, dTFactory, false);
+  }
+
+  public ExprNodeConverter(String tabAlias, RelDataType inputRowType,
+      Set<Integer> vCols, RelDataTypeFactory dTFactory, boolean foldExpr) {
+    this(tabAlias, null, inputRowType, null, vCols, dTFactory, foldExpr);
   }
 
   public ExprNodeConverter(String tabAlias, String columnAlias, RelDataType inputRowType,
           RelDataType outputRowType, Set<Integer> inputVCols, RelDataTypeFactory dTFactory) {
+    this(tabAlias, columnAlias, inputRowType, outputRowType, inputVCols, dTFactory, false);
+  }
+
+  public ExprNodeConverter(String tabAlias, String columnAlias, RelDataType inputRowType,
+          RelDataType outputRowType, Set<Integer> inputVCols, RelDataTypeFactory dTFactory,
+          boolean foldExpr) {
     super(true);
     this.tabAlias = tabAlias;
     this.inputRowType = inputRowType;
     this.inputVCols = ImmutableSet.copyOf(inputVCols);
     this.dTFactory = dTFactory;
+    this.foldExpr = foldExpr;
   }
 
   public List<WindowFunctionSpec> getWindowFunctionSpec() {
@@ -117,7 +132,7 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
    */
   @Override
   public ExprNodeDesc visitCall(RexCall call) {
-    ExprNodeGenericFuncDesc gfDesc = null;
+    ExprNodeDesc gfDesc = null;
 
     if (!deep) {
       return null;
@@ -149,6 +164,15 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
         throw new RuntimeException(e);
       }
     }
+
+    // Try to fold if it is a constant expression
+    if (foldExpr && RexUtil.isConstant(call)) {
+      ExprNodeDesc constantExpr = ConstantPropagateProcFactory.foldExpr((ExprNodeGenericFuncDesc)gfDesc);
+      if (constantExpr != null) {
+        gfDesc = constantExpr;
+      }
+    }
+
     return gfDesc;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
index 7fbf8cd..3ecbbb1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveGBOpConvUtil.java
@@ -159,7 +159,8 @@ public class HiveGBOpConvUtil {
     // 1. Collect GB Keys
     RelNode aggInputRel = aggRel.getInput();
     ExprNodeConverter exprConv = new ExprNodeConverter(inputOpAf.tabAlias,
-        aggInputRel.getRowType(), new HashSet<Integer>(), aggRel.getCluster().getTypeFactory());
+        aggInputRel.getRowType(), new HashSet<Integer>(), aggRel.getCluster().getTypeFactory(),
+        true);
 
     ExprNodeDesc tmpExprNodeDesc;
     for (int i : aggRel.getGroupSet()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
index 1307808..aef5baa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverter.java
@@ -278,7 +278,8 @@ public class HiveOpConverter {
     for (int pos = 0; pos < projectRel.getChildExps().size(); pos++) {
       ExprNodeConverter converter = new ExprNodeConverter(inputOpAf.tabAlias, projectRel
           .getRowType().getFieldNames().get(pos), projectRel.getInput().getRowType(),
-          projectRel.getRowType(), inputOpAf.vcolsInCalcite, projectRel.getCluster().getTypeFactory());
+          projectRel.getRowType(), inputOpAf.vcolsInCalcite, projectRel.getCluster().getTypeFactory(),
+          true);
       ExprNodeDesc exprCol = projectRel.getChildExps().get(pos).accept(converter);
       colExprMap.put(exprNames.get(pos), exprCol);
       exprCols.add(exprCol);
@@ -520,7 +521,7 @@ public class HiveOpConverter {
 
     ExprNodeDesc filCondExpr = filterRel.getCondition().accept(
         new ExprNodeConverter(inputOpAf.tabAlias, filterRel.getInput().getRowType(), inputOpAf.vcolsInCalcite,
-            filterRel.getCluster().getTypeFactory()));
+            filterRel.getCluster().getTypeFactory(), true));
     FilterDesc filDesc = new FilterDesc(filCondExpr, false);
     ArrayList<ColumnInfo> cinfoLst = createColInfos(inputOpAf.inputs.get(0));
     FilterOperator filOp = (FilterOperator) OperatorFactory.getAndMakeChild(filDesc,
@@ -1164,7 +1165,7 @@ public class HiveOpConverter {
   private static ExprNodeDesc convertToExprNode(RexNode rn, RelNode inputRel, String tabAlias,
           Set<Integer> vcolsInCalcite) {
     return rn.accept(new ExprNodeConverter(tabAlias, inputRel.getRowType(), vcolsInCalcite,
-        inputRel.getCluster().getTypeFactory()));
+        inputRel.getCluster().getTypeFactory(), true));
   }
 
   private static ArrayList<ColumnInfo> createColInfos(Operator<?> input) {

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
index 368264c..b5f4ca3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/HiveOpConverterPostProc.java
@@ -115,7 +115,7 @@ public class HiveOpConverterPostProc extends Transform {
             "In return path join annotate rule, we find " + aliases == null ? null : aliases
                 .size() + " aliases for " + joinOp.toString());
       }
-      final String joinOpAlias = aliases.iterator().next();;
+      final String joinOpAlias = aliases.iterator().next();
       aliasToOpInfo.put(joinOpAlias, joinOp);
 
       // 3. Populate other data structures

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
index ee4f4ea..479070b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java
@@ -74,7 +74,9 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUnixTimeStamp;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
 import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
@@ -157,7 +159,7 @@ public class RexNodeConverter {
     }
   }
 
-  private RexNode convert(final ExprNodeGenericFuncDesc func) throws SemanticException {
+  private RexNode convert(ExprNodeGenericFuncDesc func) throws SemanticException {
     ExprNodeDesc tmpExprNode;
     RexNode tmpRN;
 
@@ -174,6 +176,8 @@ public class RexNodeConverter {
             ((PrimitiveTypeInfo) func.getTypeInfo()).getPrimitiveCategory())));
     boolean isCompare = !isNumeric && tgtUdf instanceof GenericUDFBaseCompare;
     boolean isWhenCase = tgtUdf instanceof GenericUDFWhen || tgtUdf instanceof GenericUDFCase;
+    boolean isTransformableTimeStamp = func.getGenericUDF() instanceof GenericUDFUnixTimeStamp &&
+            func.getChildren().size() != 0;
 
     if (isNumeric) {
       tgtDT = func.getTypeInfo();
@@ -189,6 +193,9 @@ public class RexNodeConverter {
       if (checkForStatefulFunctions(func.getChildren())) {
         throw new SemanticException("Stateful expressions cannot be used inside of CASE");
       }
+    } else if (isTransformableTimeStamp) {
+      // unix_timestamp(args) -> to_unix_timestamp(args)
+      func = ExprNodeGenericFuncDesc.newInstance(new GenericUDFToUnixTimeStamp(), func.getChildren());
     }
 
     for (ExprNodeDesc childExpr : func.getChildren()) {
@@ -460,14 +467,16 @@ public class RexNodeConverter {
       }
       break;
     case FLOAT:
-      calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), calciteDataType);
+      calciteLiteral = rexBuilder.makeApproxLiteral(
+              new BigDecimal(Float.toString((Float)value)), calciteDataType);
       break;
     case DOUBLE:
       // TODO: The best solution is to support NaN in expression reduction.
       if (Double.isNaN((Double) value)) {
         throw new CalciteSemanticException("NaN", UnsupportedFeature.Invalid_decimal);
       }
-      calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), calciteDataType);
+      calciteLiteral = rexBuilder.makeApproxLiteral(
+              new BigDecimal(Double.toString((Double)value)), calciteDataType);
       break;
     case CHAR:
       if (value instanceof HiveChar) {

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 0b76bff..8b08ae7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -37,8 +37,6 @@ import org.apache.calcite.sql.type.SqlReturnTypeInference;
 import org.apache.calcite.sql.type.SqlTypeFamily;
 import org.apache.calcite.util.Util;
 import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.exec.FunctionInfo;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
@@ -65,6 +63,8 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
@@ -204,6 +204,7 @@ public class SqlFunctionConverter {
         case BETWEEN:
         case ROW:
         case IS_NOT_NULL:
+        case IS_NULL:
         case CASE:
           node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
           node.addChild((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text));
@@ -322,13 +323,15 @@ public class SqlFunctionConverter {
       registerFunction(">", SqlStdOperatorTable.GREATER_THAN, hToken(HiveParser.GREATERTHAN, ">"));
       registerFunction(">=", SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
           hToken(HiveParser.GREATERTHANOREQUALTO, ">="));
-      registerFunction("!", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not"));
+      registerFunction("not", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not"));
+      registerDuplicateFunction("!", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not"));
       registerFunction("<>", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveParser.NOTEQUAL, "<>"));
       registerDuplicateFunction("!=", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveParser.NOTEQUAL, "<>"));
       registerFunction("in", HiveIn.INSTANCE, hToken(HiveParser.Identifier, "in"));
       registerFunction("between", HiveBetween.INSTANCE, hToken(HiveParser.Identifier, "between"));
       registerFunction("struct", SqlStdOperatorTable.ROW, hToken(HiveParser.Identifier, "struct"));
       registerFunction("isnotnull", SqlStdOperatorTable.IS_NOT_NULL, hToken(HiveParser.TOK_ISNOTNULL, "TOK_ISNOTNULL"));
+      registerFunction("isnull", SqlStdOperatorTable.IS_NULL, hToken(HiveParser.TOK_ISNULL, "TOK_ISNULL"));
       registerFunction("when", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
       registerDuplicateFunction("case", SqlStdOperatorTable.CASE, hToken(HiveParser.Identifier, "when"));
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
index 2825f77..ba41518 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TypeConverter.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hive.ql.optimizer.calcite.translator;
 
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
@@ -28,9 +29,11 @@ import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeFactory;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexBuilder;
-import org.apache.calcite.sql.parser.SqlParserPos;
+import org.apache.calcite.sql.SqlCollation;
 import org.apache.calcite.sql.SqlIntervalQualifier;
+import org.apache.calcite.sql.parser.SqlParserPos;
 import org.apache.calcite.sql.type.SqlTypeName;
+import org.apache.calcite.util.ConversionUtil;
 import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -57,6 +60,7 @@ import com.google.common.collect.ImmutableMap.Builder;
 import com.google.common.collect.Lists;
 
 public class TypeConverter {
+
   private static final Map<String, HiveToken> calciteToHiveTypeNameMap;
 
   // TODO: Handling of char[], varchar[], string...
@@ -162,7 +166,9 @@ public class TypeConverter {
       convertedType = dtFactory.createSqlType(SqlTypeName.DOUBLE);
       break;
     case STRING:
-      convertedType = dtFactory.createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE);
+      convertedType = dtFactory.createTypeWithCharsetAndCollation(
+              dtFactory.createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE),
+              Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
       break;
     case DATE:
       convertedType = dtFactory.createSqlType(SqlTypeName.DATE);
@@ -187,12 +193,14 @@ public class TypeConverter {
           .createSqlType(SqlTypeName.DECIMAL, dtInf.precision(), dtInf.scale());
       break;
     case VARCHAR:
-      convertedType = dtFactory.createSqlType(SqlTypeName.VARCHAR,
-          ((BaseCharTypeInfo) type).getLength());
+      convertedType = dtFactory.createTypeWithCharsetAndCollation(
+              dtFactory.createSqlType(SqlTypeName.VARCHAR, ((BaseCharTypeInfo) type).getLength()),
+              Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
       break;
     case CHAR:
-      convertedType = dtFactory.createSqlType(SqlTypeName.CHAR,
-          ((BaseCharTypeInfo) type).getLength());
+      convertedType = dtFactory.createTypeWithCharsetAndCollation(
+              dtFactory.createSqlType(SqlTypeName.CHAR, ((BaseCharTypeInfo) type).getLength()),
+              Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
       break;
     case UNKNOWN:
       convertedType = dtFactory.createSqlType(SqlTypeName.OTHER);

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
index 9911179..f9388e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/pcr/PcrExprProcFactory.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
+import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
 import org.apache.hadoop.hive.ql.optimizer.ppr.PartExprEvalUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
@@ -474,7 +475,20 @@ public final class PcrExprProcFactory {
               new ExprNodeConstantDesc(fd.getTypeInfo(), result));
         }
 
-        return new NodeInfoWrapper(WalkState.CONSTANT, null, getOutExpr(fd, nodeOutputs));
+        // Try to fold, otherwise return the expression itself
+        final ExprNodeGenericFuncDesc desc = getOutExpr(fd, nodeOutputs);
+        final ExprNodeDesc foldedDesc = ConstantPropagateProcFactory.foldExpr(desc);
+        if (foldedDesc != null && foldedDesc instanceof ExprNodeConstantDesc) {
+          ExprNodeConstantDesc constant = (ExprNodeConstantDesc) foldedDesc;
+          if (Boolean.TRUE.equals(constant.getValue())) {
+            return new NodeInfoWrapper(WalkState.TRUE, null, constant);
+          } else if (Boolean.FALSE.equals(constant.getValue())) {
+            return new NodeInfoWrapper(WalkState.FALSE, null, constant);
+          } else {
+            return new NodeInfoWrapper(WalkState.CONSTANT, null, constant);
+          }
+        }
+        return new NodeInfoWrapper(WalkState.CONSTANT, null, desc);
       }
     }
   };

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index de6a053..49e65e7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -140,6 +140,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateJoinTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateProjectMergeRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregatePullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterAggregateTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterJoinRule;
@@ -153,21 +154,22 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinCommuteRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinProjectTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinPushTransitivePredicatesRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveJoinToMultiJoinRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortLimitPullUpConstantsRule;
-import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePartitionPruneRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePointLookupOptimizerRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HivePreFilteringRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectFilterPullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectMergeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveProjectSortTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveReduceExpressionsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRelFieldTrimmer;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveRulesRegistry;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortJoinReduceRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortLimitPullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortMergeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortProjectTransposeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortRemoveRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortUnionReduceRule;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter;
 import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverter;
@@ -1154,6 +1156,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       rules.add(HiveFilterJoinRule.FILTER_ON_JOIN);
       rules.add(new HiveFilterAggregateTransposeRule(Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, Aggregate.class));
       rules.add(new FilterMergeRule(HiveRelFactories.HIVE_FILTER_FACTORY));
+      rules.add(HiveProjectFilterPullUpConstantsRule.INSTANCE);
       rules.add(HiveReduceExpressionsRule.PROJECT_INSTANCE);
       rules.add(HiveReduceExpressionsRule.FILTER_INSTANCE);
       rules.add(HiveReduceExpressionsRule.JOIN_INSTANCE);
@@ -1168,6 +1171,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       rules.add(HiveSortMergeRule.INSTANCE);
       rules.add(HiveSortLimitPullUpConstantsRule.INSTANCE);
       rules.add(HiveUnionPullUpConstantsRule.INSTANCE);
+      rules.add(HiveAggregatePullUpConstantsRule.INSTANCE);
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
       basePlan = hepPlan(basePlan, true, mdProvider, executorProvider, HepMatchOrder.BOTTOM_UP,
               rules.toArray(new RelOptRule[rules.size()]));
@@ -1217,10 +1221,10 @@ public class CalcitePlanner extends SemanticAnalyzer {
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Projection Pruning");
 
-      // 8. Merge Project-Project if possible
+      // 8. Merge, remove and reduce Project if possible
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, false, mdProvider, null, new ProjectMergeRule(true,
-          HiveRelFactories.HIVE_PROJECT_FACTORY));
+      basePlan = hepPlan(basePlan, false, mdProvider, null,
+           HiveProjectMergeRule.INSTANCE, ProjectRemoveRule.INSTANCE);
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Merge Project-Project");
 
@@ -1229,9 +1233,11 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // storage (incase there are filters on non partition cols). This only
       // matches FIL-PROJ-TS
       perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER);
-      basePlan = hepPlan(basePlan, true, mdProvider, null, new HiveFilterProjectTSTransposeRule(
-          Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, HiveProject.class,
-          HiveRelFactories.HIVE_PROJECT_FACTORY, HiveTableScan.class));
+      basePlan = hepPlan(basePlan, true, mdProvider, null,
+          new HiveFilterProjectTSTransposeRule(
+              Filter.class, HiveRelFactories.HIVE_FILTER_FACTORY, HiveProject.class,
+              HiveRelFactories.HIVE_PROJECT_FACTORY, HiveTableScan.class),
+          HiveProjectFilterPullUpConstantsRule.INSTANCE);
       perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER,
         "Calcite: Prejoin ordering transformation, Rerun PPD");
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 6937308..8c93018 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -35,8 +35,8 @@ import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Queue;
 import java.util.Map.Entry;
+import java.util.Queue;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.UUID;
@@ -71,7 +71,6 @@ import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryProperties;
 import org.apache.hadoop.hive.ql.QueryState;
@@ -197,7 +196,6 @@ import org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
-import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.ResourceType;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
@@ -3155,9 +3153,23 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
     OpParseContext inputCtx = opParseCtx.get(input);
     RowResolver inputRR = inputCtx.getRowResolver();
+
+    ExprNodeDesc filterCond = genExprNodeDesc(condn, inputRR, useCaching, isCBOExecuted());
+    if (filterCond instanceof ExprNodeConstantDesc) {
+      ExprNodeConstantDesc c = (ExprNodeConstantDesc) filterCond;
+      if (Boolean.TRUE.equals(c.getValue())) {
+        // If filter condition is TRUE, we ignore it
+        return input;
+      }
+      if (ExprNodeDescUtils.isNullConstant(c)) {
+        // If filter condition is NULL, transform to FALSE
+        filterCond = new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, false);
+      }
+    }
+
     Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild(
-        new FilterDesc(genExprNodeDesc(condn, inputRR, useCaching, isCBOExecuted()), false),
-        new RowSchema(inputRR.getColumnInfos()), input), inputRR);
+        new FilterDesc(filterCond, false), new RowSchema(
+            inputRR.getColumnInfos()), input), inputRR);
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Created Filter Plan for " + qb.getId() + " row schema: "

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
index 2eaed56..239cc61 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
@@ -52,10 +52,12 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessor;
 import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
 import org.apache.hadoop.hive.ql.lib.Rule;
 import org.apache.hadoop.hive.ql.lib.RuleRegExp;
+import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.udf.SettableUDF;
@@ -1070,6 +1072,17 @@ public class TypeCheckProcFactory {
           desc = ExprNodeGenericFuncDesc.newInstance(genericUDF, funcText,
               children);
         }
+
+        // If the function is deterministic and the children are constants,
+        // we try to fold the expression to remove e.g. cast on constant
+        if (ctx.isFoldExpr() && desc instanceof ExprNodeGenericFuncDesc &&
+                FunctionRegistry.isDeterministic(genericUDF) &&
+                ExprNodeDescUtils.isAllConstants(children)) {
+          ExprNodeDesc constantExpr = ConstantPropagateProcFactory.foldExpr((ExprNodeGenericFuncDesc)desc);
+          if (constantExpr != null) {
+            desc = constantExpr;
+          }
+        }
       }
       // UDFOPPositive is a no-op.
       // However, we still create it, and then remove it here, to make sure we

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
index c6f8907..2b7b0c3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.Operator;
 import org.apache.hadoop.hive.ql.exec.UDF;
+import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge;
@@ -210,15 +211,25 @@ public class ExprNodeDescUtils {
    */
   public static ArrayList<ExprNodeDesc> backtrack(List<ExprNodeDesc> sources,
       Operator<?> current, Operator<?> terminal) throws SemanticException {
+    return backtrack(sources, current, terminal, false);
+  }
+
+  public static ArrayList<ExprNodeDesc> backtrack(List<ExprNodeDesc> sources,
+      Operator<?> current, Operator<?> terminal, boolean foldExpr) throws SemanticException {
     ArrayList<ExprNodeDesc> result = new ArrayList<ExprNodeDesc>();
     for (ExprNodeDesc expr : sources) {
-      result.add(backtrack(expr, current, terminal));
+      result.add(backtrack(expr, current, terminal, foldExpr));
     }
     return result;
   }
 
   public static ExprNodeDesc backtrack(ExprNodeDesc source, Operator<?> current,
       Operator<?> terminal) throws SemanticException {
+    return backtrack(source, current, terminal, false);
+  }
+
+  public static ExprNodeDesc backtrack(ExprNodeDesc source, Operator<?> current,
+      Operator<?> terminal, boolean foldExpr) throws SemanticException {
     Operator<?> parent = getSingleParent(current, terminal);
     if (parent == null) {
       return source;
@@ -226,7 +237,7 @@ public class ExprNodeDescUtils {
     if (source instanceof ExprNodeGenericFuncDesc) {
       // all children expression should be resolved
       ExprNodeGenericFuncDesc function = (ExprNodeGenericFuncDesc) source.clone();
-      List<ExprNodeDesc> children = backtrack(function.getChildren(), current, terminal);
+      List<ExprNodeDesc> children = backtrack(function.getChildren(), current, terminal, foldExpr);
       for (ExprNodeDesc child : children) {
         if (child == null) {
           // Could not resolve all of the function children, fail
@@ -234,6 +245,13 @@ public class ExprNodeDescUtils {
         }
       }
       function.setChildren(children);
+      if (foldExpr) {
+        // fold after replacing, if possible
+        ExprNodeDesc foldedFunction = ConstantPropagateProcFactory.foldExpr(function);
+        if (foldedFunction != null) {
+          return foldedFunction;
+        }
+      }
       return function;
     }
     if (source instanceof ExprNodeColumnDesc) {
@@ -243,7 +261,7 @@ public class ExprNodeDescUtils {
     if (source instanceof ExprNodeFieldDesc) {
       // field expression should be resolved
       ExprNodeFieldDesc field = (ExprNodeFieldDesc) source.clone();
-      ExprNodeDesc fieldDesc = backtrack(field.getDesc(), current, terminal);
+      ExprNodeDesc fieldDesc = backtrack(field.getDesc(), current, terminal, foldExpr);
       if (fieldDesc == null) {
         return null;
       }
@@ -485,6 +503,25 @@ public class ExprNodeDescUtils {
 		}
 	}
 
+  public static boolean isConstant(ExprNodeDesc value) {
+    if (value instanceof ExprNodeConstantDesc) {
+      return true;
+    }
+    if (value instanceof ExprNodeGenericFuncDesc) {
+      ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc) value;
+      if (!FunctionRegistry.isDeterministic(func.getGenericUDF())) {
+        return false;
+      }
+      for (ExprNodeDesc child : func.getChildren()) {
+        if (!isConstant(child)) {
+          return false;
+        }
+      }
+      return true;
+    }
+    return false;
+  }
+
   public static boolean isAllConstants(List<ExprNodeDesc> value) {
     for (ExprNodeDesc expr : value) {
       if (!(expr instanceof ExprNodeConstantDesc)) {
@@ -641,4 +678,35 @@ public class ExprNodeDescUtils {
     }
     return (expr instanceof ExprNodeColumnDesc) ? (ExprNodeColumnDesc)expr : null;
   }
+
+  // Find the constant origin of a certain column if it is originated from a constant
+  // Otherwise, it returns the expression that originated the column
+  public static ExprNodeDesc findConstantExprOrigin(String dpCol, Operator<? extends OperatorDesc> op) {
+    ExprNodeDesc expr = op.getColumnExprMap().get(dpCol);
+    ExprNodeDesc foldedExpr;
+    // If it is a function, we try to fold it
+    if (expr instanceof ExprNodeGenericFuncDesc) {
+      foldedExpr = ConstantPropagateProcFactory.foldExpr((ExprNodeGenericFuncDesc)expr);
+      if (foldedExpr == null) {
+        foldedExpr = expr;
+      }
+    } else {
+      foldedExpr = expr;
+    }
+    // If it is a column reference, we will try to resolve it
+    if (foldedExpr instanceof ExprNodeColumnDesc) {
+      Operator<? extends OperatorDesc> originOp = null;
+      for(Operator<? extends OperatorDesc> parentOp : op.getParentOperators()) {
+        if (parentOp.getColumnExprMap() != null) {
+          originOp = parentOp;
+          break;
+        }
+      }
+      if (originOp != null) {
+        return findConstantExprOrigin(((ExprNodeColumnDesc)foldedExpr).getColumn(), originOp);
+      }
+    }
+    // Otherwise, we return the expression
+    return foldedExpr;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/queries/clientpositive/join_view.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/join_view.q b/ql/src/test/queries/clientpositive/join_view.q
index 16b6816..69c96be 100644
--- a/ql/src/test/queries/clientpositive/join_view.q
+++ b/ql/src/test/queries/clientpositive/join_view.q
@@ -3,8 +3,6 @@ drop table invites2;
 create table invites (foo int, bar string) partitioned by (ds string);
 create table invites2 (foo int, bar string) partitioned by (ds string);
 
-set hive.mapred.mode=strict;
-
 -- test join views: see HIVE-1989
 
 create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds;
@@ -13,4 +11,4 @@ explain select * from v where ds='2011-09-01';
 
 drop view v;
 drop table invites;
-drop table invites2;
\ No newline at end of file
+drop table invites2;

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
index ba0419e..99183fc 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
@@ -718,15 +718,15 @@ STAGE PLANS:
             alias: loc_orc
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: ((year = 2001) and year is null) (type: boolean)
+              predicate: false (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
-                expressions: state (type: string), locid (type: int), zip (type: bigint), null (type: int)
+                expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out b/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
index c2b9872..52d17b4 100644
--- a/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
+++ b/ql/src/test/results/clientpositive/archive_excludeHadoop20.q.out
@@ -137,6 +137,7 @@ POSTHOOK: Input: default@tstsrcpart
 POSTHOOK: Input: default@tstsrcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 0	3
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM tstsrcpart a JOIN tstsrc b ON a.key=b.key
 WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/archive_multi.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/archive_multi.q.out b/ql/src/test/results/clientpositive/archive_multi.q.out
index 0ad29d1..38f3f1a 100644
--- a/ql/src/test/results/clientpositive/archive_multi.q.out
+++ b/ql/src/test/results/clientpositive/archive_multi.q.out
@@ -141,6 +141,7 @@ POSTHOOK: Input: ac_test@tstsrcpart
 POSTHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 0	3
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT * FROM ac_test.tstsrcpart a JOIN ac_test.tstsrc b ON a.key=b.key
 WHERE a.ds='2008-04-08' AND a.hr='12' AND a.key='0'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out b/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
index a9ed049..fefb50c 100644
--- a/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/authorization_explain.q.java1.7.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- JAVA_VERSION_SPECIFIC_OUTPUT
 
 explain authorization select * from src join srcpart
@@ -20,7 +20,7 @@ CURRENT_USER:
   hive_test_user
 OPERATION: 
   QUERY
-Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain formatted authorization select * from src join srcpart
 PREHOOK: type: QUERY
 POSTHOOK: query: explain formatted authorization select * from src join srcpart

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/auto_join33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join33.q.out b/ql/src/test/results/clientpositive/auto_join33.q.out
index b0b3019..5a8bf8c 100644
--- a/ql/src/test/results/clientpositive/auto_join33.q.out
+++ b/ql/src/test/results/clientpositive/auto_join33.q.out
@@ -42,8 +42,8 @@ STAGE PLANS:
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 HashTable Sink Operator
                   keys:
-                    0 (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
-                    1 (UDFToDouble(_col0) + UDFToDouble(2)) (type: double)
+                    0 (UDFToDouble(_col0) + 1.0) (type: double)
+                    1 (UDFToDouble(_col0) + 2.0) (type: double)
 
   Stage: Stage-3
     Map Reduce
@@ -62,8 +62,8 @@ STAGE PLANS:
                   condition map:
                        Inner Join 0 to 1
                   keys:
-                    0 (UDFToDouble(_col0) + UDFToDouble(1)) (type: double)
-                    1 (UDFToDouble(_col0) + UDFToDouble(2)) (type: double)
+                    0 (UDFToDouble(_col0) + 1.0) (type: double)
+                    1 (UDFToDouble(_col0) + 2.0) (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3
                   Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out
index 324f95d..8daa1c5 100644
--- a/ql/src/test/results/clientpositive/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/auto_join8.q.out
@@ -152,7 +152,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@dest1
 POSTHOOK: Lineage: dest1.c1 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: dest1.c2 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
-POSTHOOK: Lineage: dest1.c3 EXPRESSION [(src)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest1.c3 EXPRESSION []
 POSTHOOK: Lineage: dest1.c4 SIMPLE [(src)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT sum(hash(dest1.c1,dest1.c2,dest1.c3,dest1.c4)) FROM dest1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/auto_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join_filters.q.out b/ql/src/test/results/clientpositive/auto_join_filters.q.out
index 2fdf470..2d4a043 100644
--- a/ql/src/test/results/clientpositive/auto_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/auto_join_filters.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in3.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -300,7 +300,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in2.txt' into table sm
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@smb_input2
-Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[20][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/auto_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join_nulls.q.out b/ql/src/test/results/clientpositive/auto_join_nulls.q.out
index 4af5535..44917c5 100644
--- a/ql/src/test/results/clientpositive/auto_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/auto_join_nulls.q.out
@@ -14,7 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in1.txt' INTO TABLE my
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@myinput1
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1
@@ -24,7 +24,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@myinput1
 #### A masked pattern was here ####
 13630578
-Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[16][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a LEFT OUTER JOIN myinput1 b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@myinput1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
index d8eacbe..62c819e 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out
@@ -138,7 +138,7 @@ POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3out
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@bucket_medium@ds=2008-04-08
-Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
 PREHOOK: type: QUERY
 POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
@@ -631,7 +631,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@bucket_big

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/bucket_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucket_groupby.q.out b/ql/src/test/results/clientpositive/bucket_groupby.q.out
index ae736f9..1afab38 100644
--- a/ql/src/test/results/clientpositive/bucket_groupby.q.out
+++ b/ql/src/test/results/clientpositive/bucket_groupby.q.out
@@ -438,33 +438,29 @@ STAGE PLANS:
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                keys: _col0 (type: string), 3 (type: int)
+                keys: _col0 (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), 3 (type: int)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), 3 (type: int)
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), 3 (type: int)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -1018,34 +1014,30 @@ STAGE PLANS:
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
-                keys: _col0 (type: string), 3 (type: int)
+                keys: _col0 (type: string)
                 mode: hash
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), 3 (type: int)
-                  sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), 3 (type: int)
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col2 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
-          keys: KEY._col0 (type: string), 3 (type: int)
+          keys: KEY._col0 (type: string)
           mode: mergepartial
-          outputColumnNames: _col0, _col1, _col2
+          outputColumnNames: _col0, _col1
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col1
+          File Output Operator
+            compressed: false
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out b/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
index 6b40ee8..557e270 100644
--- a/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
+++ b/ql/src/test/results/clientpositive/bucketizedhiveinputformat.q.out
@@ -22,7 +22,7 @@ POSTHOOK: query: CREATE TABLE T2(name STRING) STORED AS SEQUENCEFILE
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2
-Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[9][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: INSERT OVERWRITE TABLE T2 SELECT * FROM (
 SELECT tmp1.name as name FROM (
   SELECT name, 'MMM' AS n FROM T1) tmp1 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cast1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cast1.q.out b/ql/src/test/results/clientpositive/cast1.q.out
index 48a0c14..d87c04c 100644
--- a/ql/src/test/results/clientpositive/cast1.q.out
+++ b/ql/src/test/results/clientpositive/cast1.q.out
@@ -110,8 +110,8 @@ POSTHOOK: Lineage: dest1.c2 SIMPLE []
 POSTHOOK: Lineage: dest1.c3 SIMPLE []
 POSTHOOK: Lineage: dest1.c4 SIMPLE []
 POSTHOOK: Lineage: dest1.c5 SIMPLE []
-POSTHOOK: Lineage: dest1.c6 EXPRESSION []
-POSTHOOK: Lineage: dest1.c7 EXPRESSION []
+POSTHOOK: Lineage: dest1.c6 SIMPLE []
+POSTHOOK: Lineage: dest1.c7 SIMPLE []
 PREHOOK: query: select dest1.* FROM dest1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest1

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/cbo_const.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_const.q.out b/ql/src/test/results/clientpositive/cbo_const.q.out
index c2a5194..ecf0269 100644
--- a/ql/src/test/results/clientpositive/cbo_const.q.out
+++ b/ql/src/test/results/clientpositive/cbo_const.q.out
@@ -19,21 +19,18 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 2 01:02:03.000000000	2 01:02:03.000000000	2 01:02:03.000000000	2 01:02:03.000000000	true
+Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: drop view t1


[11/66] [abbrv] hive git commit: HIVE-13068: Disable Hive ConstantPropagate optimizer when CBO has optimized the plan II (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by sp...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query31.q.out b/ql/src/test/results/clientpositive/perf/query31.q.out
index 9d0a904..935631c 100644
--- a/ql/src/test/results/clientpositive/perf/query31.q.out
+++ b/ql/src/test/results/clientpositive/perf/query31.q.out
@@ -33,319 +33,305 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 7
-      File Output Operator [FS_145]
-        Select Operator [SEL_144] (rows=347867560 width=88)
+      File Output Operator [FS_140]
+        Select Operator [SEL_139] (rows=316243230 width=88)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
         <-Reducer 6 [SIMPLE_EDGE]
-          SHUFFLE [RS_143]
-            Select Operator [SEL_142] (rows=347867560 width=88)
-              Output:["_col0","_col2","_col3","_col4","_col5"]
-              Filter Operator [FIL_141] (rows=347867560 width=88)
-                predicate:CASE WHEN ((_col7 > 0)) THEN (CASE WHEN ((_col19 > 0)) THEN (((_col23 / _col19) > (_col11 / _col7))) ELSE ((null > (_col11 / _col7))) END) ELSE (CASE WHEN ((_col19 > 0)) THEN (((_col23 / _col19) > null)) ELSE (null) END) END
-                Merge Join Operator [MERGEJOIN_277] (rows=695735121 width=88)
-                  Conds:RS_138._col12=RS_139._col0(Inner),Output:["_col0","_col3","_col7","_col11","_col15","_col19","_col23"]
+          SHUFFLE [RS_138]
+            Select Operator [SEL_137] (rows=316243230 width=88)
+              Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
+              Filter Operator [FIL_136] (rows=316243230 width=88)
+                predicate:CASE WHEN ((_col3 > 0)) THEN (CASE WHEN ((_col9 > 0)) THEN (((_col11 / _col9) > (_col5 / _col3))) ELSE ((null > (_col5 / _col3))) END) ELSE (CASE WHEN ((_col9 > 0)) THEN (((_col11 / _col9) > null)) ELSE (null) END) END
+                Merge Join Operator [MERGEJOIN_272] (rows=632486460 width=88)
+                  Conds:RS_132._col6=RS_133._col0(Inner),Output:["_col0","_col1","_col3","_col5","_col7","_col9","_col11"]
                 <-Reducer 38 [SIMPLE_EDGE]
-                  SHUFFLE [RS_139]
+                  SHUFFLE [RS_133]
                     PartitionCols:_col0
-                    Select Operator [SEL_137] (rows=87121617 width=135)
-                      Output:["_col0","_col3"]
-                      Group By Operator [GBY_136] (rows=87121617 width=135)
-                        Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, 3, 1998
-                      <-Reducer 37 [SIMPLE_EDGE]
-                        SHUFFLE [RS_135]
-                          PartitionCols:_col0, 3, 1998
-                          Group By Operator [GBY_134] (rows=174243235 width=135)
-                            Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:_col0, 3, 1998
-                            Select Operator [SEL_132] (rows=174243235 width=135)
-                              Output:["_col0","_col3"]
-                              Merge Join Operator [MERGEJOIN_274] (rows=174243235 width=135)
-                                Conds:RS_129._col1=RS_130._col0(Inner),Output:["_col2","_col7"]
-                              <-Map 40 [SIMPLE_EDGE]
-                                SHUFFLE [RS_130]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_125] (rows=40000000 width=1014)
-                                    Output:["_col0","_col1"]
-                                    Filter Operator [FIL_262] (rows=40000000 width=1014)
-                                      predicate:(ca_address_sk is not null and ca_county is not null)
-                                      TableScan [TS_123] (rows=40000000 width=1014)
-                                        default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                              <-Reducer 36 [SIMPLE_EDGE]
-                                SHUFFLE [RS_129]
-                                  PartitionCols:_col1
-                                  Merge Join Operator [MERGEJOIN_273] (rows=158402938 width=135)
-                                    Conds:RS_126._col0=RS_127._col0(Inner),Output:["_col1","_col2"]
-                                  <-Map 35 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_126]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_119] (rows=144002668 width=135)
-                                        Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_260] (rows=144002668 width=135)
-                                          predicate:(ws_sold_date_sk is not null and ws_bill_addr_sk is not null)
-                                          TableScan [TS_117] (rows=144002668 width=135)
-                                            default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
-                                  <-Map 39 [SIMPLE_EDGE]
-                                    SHUFFLE [RS_127]
-                                      PartitionCols:_col0
-                                      Select Operator [SEL_122] (rows=18262 width=1119)
-                                        Output:["_col0"]
-                                        Filter Operator [FIL_261] (rows=18262 width=1119)
-                                          predicate:((d_qoy = 3) and (d_year = 1998) and d_date_sk is not null)
-                                          TableScan [TS_120] (rows=73049 width=1119)
-                                            default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                    Group By Operator [GBY_130] (rows=87121617 width=135)
+                      Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                    <-Reducer 37 [SIMPLE_EDGE]
+                      SHUFFLE [RS_129]
+                        PartitionCols:_col0
+                        Group By Operator [GBY_128] (rows=174243235 width=135)
+                          Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
+                          Select Operator [SEL_127] (rows=174243235 width=135)
+                            Output:["_col7","_col2"]
+                            Merge Join Operator [MERGEJOIN_269] (rows=174243235 width=135)
+                              Conds:RS_124._col1=RS_125._col0(Inner),Output:["_col2","_col7"]
+                            <-Map 40 [SIMPLE_EDGE]
+                              SHUFFLE [RS_125]
+                                PartitionCols:_col0
+                                Select Operator [SEL_120] (rows=40000000 width=1014)
+                                  Output:["_col0","_col1"]
+                                  Filter Operator [FIL_257] (rows=40000000 width=1014)
+                                    predicate:(ca_address_sk is not null and ca_county is not null)
+                                    TableScan [TS_118] (rows=40000000 width=1014)
+                                      default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
+                            <-Reducer 36 [SIMPLE_EDGE]
+                              SHUFFLE [RS_124]
+                                PartitionCols:_col1
+                                Merge Join Operator [MERGEJOIN_268] (rows=158402938 width=135)
+                                  Conds:RS_121._col0=RS_122._col0(Inner),Output:["_col1","_col2"]
+                                <-Map 35 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_121]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_114] (rows=144002668 width=135)
+                                      Output:["_col0","_col1","_col2"]
+                                      Filter Operator [FIL_255] (rows=144002668 width=135)
+                                        predicate:(ws_sold_date_sk is not null and ws_bill_addr_sk is not null)
+                                        TableScan [TS_112] (rows=144002668 width=135)
+                                          default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
+                                <-Map 39 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_122]
+                                    PartitionCols:_col0
+                                    Select Operator [SEL_117] (rows=18262 width=1119)
+                                      Output:["_col0"]
+                                      Filter Operator [FIL_256] (rows=18262 width=1119)
+                                        predicate:((d_qoy = 3) and (d_year = 1998) and d_date_sk is not null)
+                                        TableScan [TS_115] (rows=73049 width=1119)
+                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                 <-Reducer 5 [SIMPLE_EDGE]
-                  SHUFFLE [RS_138]
-                    PartitionCols:_col12
-                    Select Operator [SEL_116] (rows=632486460 width=88)
-                      Output:["_col0","_col11","_col12","_col15","_col19","_col3","_col7"]
-                      Filter Operator [FIL_115] (rows=632486460 width=88)
-                        predicate:CASE WHEN ((_col3 > 0)) THEN (CASE WHEN ((_col15 > 0)) THEN (((_col19 / _col15) > (_col7 / _col3))) ELSE ((null > (_col7 / _col3))) END) ELSE (CASE WHEN ((_col15 > 0)) THEN (((_col19 / _col15) > null)) ELSE (null) END) END
-                        Select Operator [SEL_114] (rows=1264972920 width=88)
-                          Output:["_col0","_col3","_col7","_col11","_col12","_col15","_col19"]
-                          Merge Join Operator [MERGEJOIN_276] (rows=1264972920 width=88)
-                            Conds:RS_109._col0=RS_110._col0(Inner),RS_109._col0=RS_111._col0(Inner),RS_111._col0=RS_112._col4(Inner),Output:["_col0","_col3","_col7","_col8","_col11","_col15","_col19"]
-                          <-Reducer 13 [SIMPLE_EDGE]
-                            SHUFFLE [RS_110]
-                              PartitionCols:_col0
-                              Select Operator [SEL_41] (rows=87121617 width=135)
-                                Output:["_col0","_col3"]
-                                Group By Operator [GBY_40] (rows=87121617 width=135)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, 2, 1998
-                                <-Reducer 12 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_39]
-                                    PartitionCols:_col0, 2, 1998
-                                    Group By Operator [GBY_38] (rows=174243235 width=135)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:_col0, 2, 1998
-                                      Select Operator [SEL_36] (rows=174243235 width=135)
-                                        Output:["_col0","_col3"]
-                                        Merge Join Operator [MERGEJOIN_266] (rows=174243235 width=135)
-                                          Conds:RS_33._col1=RS_34._col0(Inner),Output:["_col2","_col7"]
-                                        <-Map 15 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_34]
+                  SHUFFLE [RS_132]
+                    PartitionCols:_col6
+                    Filter Operator [FIL_110] (rows=574987679 width=88)
+                      predicate:CASE WHEN ((_col1 > 0)) THEN (CASE WHEN ((_col7 > 0)) THEN (((_col9 / _col7) > (_col3 / _col1))) ELSE ((null > (_col3 / _col1))) END) ELSE (CASE WHEN ((_col7 > 0)) THEN (((_col9 / _col7) > null)) ELSE (null) END) END
+                      Select Operator [SEL_109] (rows=1149975359 width=88)
+                        Output:["_col0","_col1","_col3","_col5","_col6","_col7","_col9"]
+                        Merge Join Operator [MERGEJOIN_271] (rows=1149975359 width=88)
+                          Conds:RS_104._col0=RS_105._col0(Inner),RS_104._col0=RS_106._col0(Inner),RS_104._col0=RS_107._col0(Inner),Output:["_col0","_col1","_col3","_col5","_col6","_col7","_col9"]
+                        <-Reducer 13 [SIMPLE_EDGE]
+                          SHUFFLE [RS_105]
+                            PartitionCols:_col0
+                            Group By Operator [GBY_38] (rows=348477374 width=88)
+                              Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                            <-Reducer 12 [SIMPLE_EDGE]
+                              SHUFFLE [RS_37]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_36] (rows=696954748 width=88)
+                                  Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
+                                  Select Operator [SEL_35] (rows=696954748 width=88)
+                                    Output:["_col7","_col2"]
+                                    Merge Join Operator [MERGEJOIN_261] (rows=696954748 width=88)
+                                      Conds:RS_32._col1=RS_33._col0(Inner),Output:["_col2","_col7"]
+                                    <-Map 15 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_33]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_28] (rows=40000000 width=1014)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_245] (rows=40000000 width=1014)
+                                            predicate:(ca_address_sk is not null and ca_county is not null)
+                                            TableScan [TS_26] (rows=40000000 width=1014)
+                                              default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
+                                    <-Reducer 11 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_32]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_260] (rows=633595212 width=88)
+                                          Conds:RS_29._col0=RS_30._col0(Inner),Output:["_col1","_col2"]
+                                        <-Map 10 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_29]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_29] (rows=40000000 width=1014)
+                                            Select Operator [SEL_22] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2"]
+                                              Filter Operator [FIL_243] (rows=575995635 width=88)
+                                                predicate:(ss_sold_date_sk is not null and ss_addr_sk is not null)
+                                                TableScan [TS_20] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
+                                        <-Map 14 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_30]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_25] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_244] (rows=18262 width=1119)
+                                                predicate:((d_qoy = 1) and (d_year = 1998) and d_date_sk is not null)
+                                                TableScan [TS_23] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                        <-Reducer 19 [SIMPLE_EDGE]
+                          SHUFFLE [RS_106]
+                            PartitionCols:_col0
+                            Group By Operator [GBY_58] (rows=348477374 width=88)
+                              Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                            <-Reducer 18 [SIMPLE_EDGE]
+                              SHUFFLE [RS_57]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_56] (rows=696954748 width=88)
+                                  Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
+                                  Select Operator [SEL_55] (rows=696954748 width=88)
+                                    Output:["_col7","_col2"]
+                                    Merge Join Operator [MERGEJOIN_263] (rows=696954748 width=88)
+                                      Conds:RS_52._col1=RS_53._col0(Inner),Output:["_col2","_col7"]
+                                    <-Map 21 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_53]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_48] (rows=40000000 width=1014)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_248] (rows=40000000 width=1014)
+                                            predicate:(ca_address_sk is not null and ca_county is not null)
+                                            TableScan [TS_46] (rows=40000000 width=1014)
+                                              default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
+                                    <-Reducer 17 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_52]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_262] (rows=633595212 width=88)
+                                          Conds:RS_49._col0=RS_50._col0(Inner),Output:["_col1","_col2"]
+                                        <-Map 16 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_49]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_42] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2"]
+                                              Filter Operator [FIL_246] (rows=575995635 width=88)
+                                                predicate:(ss_sold_date_sk is not null and ss_addr_sk is not null)
+                                                TableScan [TS_40] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
+                                        <-Map 20 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_50]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_45] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_247] (rows=18262 width=1119)
+                                                predicate:((d_qoy = 3) and (d_year = 1998) and d_date_sk is not null)
+                                                TableScan [TS_43] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                        <-Reducer 26 [SIMPLE_EDGE]
+                          SHUFFLE [RS_107]
+                            PartitionCols:_col0
+                            Merge Join Operator [MERGEJOIN_270] (rows=95833780 width=135)
+                              Conds:RS_100._col0=RS_101._col0(Inner),Output:["_col0","_col1","_col3"]
+                            <-Reducer 25 [SIMPLE_EDGE]
+                              SHUFFLE [RS_100]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_78] (rows=87121617 width=135)
+                                  Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                                <-Reducer 24 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_77]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_76] (rows=174243235 width=135)
+                                      Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
+                                      Select Operator [SEL_75] (rows=174243235 width=135)
+                                        Output:["_col7","_col2"]
+                                        Merge Join Operator [MERGEJOIN_265] (rows=174243235 width=135)
+                                          Conds:RS_72._col1=RS_73._col0(Inner),Output:["_col2","_col7"]
+                                        <-Map 28 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_73]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_68] (rows=40000000 width=1014)
                                               Output:["_col0","_col1"]
-                                              Filter Operator [FIL_250] (rows=40000000 width=1014)
+                                              Filter Operator [FIL_251] (rows=40000000 width=1014)
                                                 predicate:(ca_address_sk is not null and ca_county is not null)
-                                                TableScan [TS_27] (rows=40000000 width=1014)
+                                                TableScan [TS_66] (rows=40000000 width=1014)
                                                   default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                                        <-Reducer 11 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_33]
+                                        <-Reducer 23 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_72]
                                             PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_265] (rows=158402938 width=135)
-                                              Conds:RS_30._col0=RS_31._col0(Inner),Output:["_col1","_col2"]
-                                            <-Map 10 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_30]
+                                            Merge Join Operator [MERGEJOIN_264] (rows=158402938 width=135)
+                                              Conds:RS_69._col0=RS_70._col0(Inner),Output:["_col1","_col2"]
+                                            <-Map 22 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_69]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_23] (rows=144002668 width=135)
+                                                Select Operator [SEL_62] (rows=144002668 width=135)
                                                   Output:["_col0","_col1","_col2"]
-                                                  Filter Operator [FIL_248] (rows=144002668 width=135)
+                                                  Filter Operator [FIL_249] (rows=144002668 width=135)
                                                     predicate:(ws_sold_date_sk is not null and ws_bill_addr_sk is not null)
-                                                    TableScan [TS_21] (rows=144002668 width=135)
+                                                    TableScan [TS_60] (rows=144002668 width=135)
                                                       default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
-                                            <-Map 14 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_31]
+                                            <-Map 27 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_70]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_26] (rows=18262 width=1119)
+                                                Select Operator [SEL_65] (rows=18262 width=1119)
                                                   Output:["_col0"]
-                                                  Filter Operator [FIL_249] (rows=18262 width=1119)
-                                                    predicate:((d_qoy = 2) and (d_year = 1998) and d_date_sk is not null)
-                                                    TableScan [TS_24] (rows=73049 width=1119)
-                                                      default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                          <-Reducer 19 [SIMPLE_EDGE]
-                            SHUFFLE [RS_111]
-                              PartitionCols:_col0
-                              Select Operator [SEL_62] (rows=348477374 width=88)
-                                Output:["_col0","_col3"]
-                                Group By Operator [GBY_61] (rows=348477374 width=88)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, 1, 1998
-                                <-Reducer 18 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_60]
-                                    PartitionCols:_col0, 1, 1998
-                                    Group By Operator [GBY_59] (rows=696954748 width=88)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:_col0, 1, 1998
-                                      Select Operator [SEL_57] (rows=696954748 width=88)
-                                        Output:["_col0","_col3"]
-                                        Merge Join Operator [MERGEJOIN_268] (rows=696954748 width=88)
-                                          Conds:RS_54._col1=RS_55._col0(Inner),Output:["_col2","_col7"]
-                                        <-Map 21 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_55]
-                                            PartitionCols:_col0
-                                            Select Operator [SEL_50] (rows=40000000 width=1014)
-                                              Output:["_col0","_col1"]
-                                              Filter Operator [FIL_253] (rows=40000000 width=1014)
-                                                predicate:(ca_address_sk is not null and ca_county is not null)
-                                                TableScan [TS_48] (rows=40000000 width=1014)
-                                                  default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                                        <-Reducer 17 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_54]
-                                            PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_267] (rows=633595212 width=88)
-                                              Conds:RS_51._col0=RS_52._col0(Inner),Output:["_col1","_col2"]
-                                            <-Map 16 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_51]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_44] (rows=575995635 width=88)
-                                                  Output:["_col0","_col1","_col2"]
-                                                  Filter Operator [FIL_251] (rows=575995635 width=88)
-                                                    predicate:(ss_sold_date_sk is not null and ss_addr_sk is not null)
-                                                    TableScan [TS_42] (rows=575995635 width=88)
-                                                      default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
-                                            <-Map 20 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_52]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_47] (rows=18262 width=1119)
-                                                  Output:["_col0"]
-                                                  Filter Operator [FIL_252] (rows=18262 width=1119)
+                                                  Filter Operator [FIL_250] (rows=18262 width=1119)
                                                     predicate:((d_qoy = 1) and (d_year = 1998) and d_date_sk is not null)
-                                                    TableScan [TS_45] (rows=73049 width=1119)
+                                                    TableScan [TS_63] (rows=73049 width=1119)
                                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                          <-Reducer 26 [SIMPLE_EDGE]
-                            SHUFFLE [RS_112]
-                              PartitionCols:_col4
-                              Merge Join Operator [MERGEJOIN_275] (rows=383325119 width=88)
-                                Conds:RS_105._col0=RS_106._col0(Inner),Output:["_col3","_col4","_col7"]
-                              <-Reducer 25 [SIMPLE_EDGE]
-                                SHUFFLE [RS_105]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_83] (rows=348477374 width=88)
-                                    Output:["_col0","_col3"]
-                                    Group By Operator [GBY_82] (rows=348477374 width=88)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, 3, 1998
-                                    <-Reducer 24 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_81]
-                                        PartitionCols:_col0, 3, 1998
-                                        Group By Operator [GBY_80] (rows=696954748 width=88)
-                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:_col0, 3, 1998
-                                          Select Operator [SEL_78] (rows=696954748 width=88)
-                                            Output:["_col0","_col3"]
-                                            Merge Join Operator [MERGEJOIN_270] (rows=696954748 width=88)
-                                              Conds:RS_75._col1=RS_76._col0(Inner),Output:["_col2","_col7"]
-                                            <-Map 28 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_76]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_71] (rows=40000000 width=1014)
-                                                  Output:["_col0","_col1"]
-                                                  Filter Operator [FIL_256] (rows=40000000 width=1014)
-                                                    predicate:(ca_address_sk is not null and ca_county is not null)
-                                                    TableScan [TS_69] (rows=40000000 width=1014)
-                                                      default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                                            <-Reducer 23 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_75]
-                                                PartitionCols:_col1
-                                                Merge Join Operator [MERGEJOIN_269] (rows=633595212 width=88)
-                                                  Conds:RS_72._col0=RS_73._col0(Inner),Output:["_col1","_col2"]
-                                                <-Map 22 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_72]
-                                                    PartitionCols:_col0
-                                                    Select Operator [SEL_65] (rows=575995635 width=88)
-                                                      Output:["_col0","_col1","_col2"]
-                                                      Filter Operator [FIL_254] (rows=575995635 width=88)
-                                                        predicate:(ss_sold_date_sk is not null and ss_addr_sk is not null)
-                                                        TableScan [TS_63] (rows=575995635 width=88)
-                                                          default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
-                                                <-Map 27 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_73]
-                                                    PartitionCols:_col0
-                                                    Select Operator [SEL_68] (rows=18262 width=1119)
-                                                      Output:["_col0"]
-                                                      Filter Operator [FIL_255] (rows=18262 width=1119)
-                                                        predicate:((d_qoy = 3) and (d_year = 1998) and d_date_sk is not null)
-                                                        TableScan [TS_66] (rows=73049 width=1119)
-                                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                              <-Reducer 32 [SIMPLE_EDGE]
-                                SHUFFLE [RS_106]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_104] (rows=348477374 width=88)
-                                    Output:["_col0","_col3"]
-                                    Group By Operator [GBY_103] (rows=348477374 width=88)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, 2, 1998
-                                    <-Reducer 31 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_102]
-                                        PartitionCols:_col0, 2, 1998
-                                        Group By Operator [GBY_101] (rows=696954748 width=88)
-                                          Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:_col0, 2, 1998
-                                          Select Operator [SEL_99] (rows=696954748 width=88)
-                                            Output:["_col0","_col3"]
-                                            Merge Join Operator [MERGEJOIN_272] (rows=696954748 width=88)
-                                              Conds:RS_96._col1=RS_97._col0(Inner),Output:["_col2","_col7"]
-                                            <-Map 34 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_97]
-                                                PartitionCols:_col0
-                                                Select Operator [SEL_92] (rows=40000000 width=1014)
-                                                  Output:["_col0","_col1"]
-                                                  Filter Operator [FIL_259] (rows=40000000 width=1014)
-                                                    predicate:(ca_address_sk is not null and ca_county is not null)
-                                                    TableScan [TS_90] (rows=40000000 width=1014)
-                                                      default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                                            <-Reducer 30 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_96]
-                                                PartitionCols:_col1
-                                                Merge Join Operator [MERGEJOIN_271] (rows=633595212 width=88)
-                                                  Conds:RS_93._col0=RS_94._col0(Inner),Output:["_col1","_col2"]
-                                                <-Map 29 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_93]
-                                                    PartitionCols:_col0
-                                                    Select Operator [SEL_86] (rows=575995635 width=88)
-                                                      Output:["_col0","_col1","_col2"]
-                                                      Filter Operator [FIL_257] (rows=575995635 width=88)
-                                                        predicate:(ss_sold_date_sk is not null and ss_addr_sk is not null)
-                                                        TableScan [TS_84] (rows=575995635 width=88)
-                                                          default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
-                                                <-Map 33 [SIMPLE_EDGE]
-                                                  SHUFFLE [RS_94]
-                                                    PartitionCols:_col0
-                                                    Select Operator [SEL_89] (rows=18262 width=1119)
-                                                      Output:["_col0"]
-                                                      Filter Operator [FIL_258] (rows=18262 width=1119)
-                                                        predicate:((d_qoy = 2) and (d_year = 1998) and d_date_sk is not null)
-                                                        TableScan [TS_87] (rows=73049 width=1119)
-                                                          default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
-                          <-Reducer 4 [SIMPLE_EDGE]
-                            SHUFFLE [RS_109]
-                              PartitionCols:_col0
-                              Select Operator [SEL_20] (rows=87121617 width=135)
-                                Output:["_col0","_col3"]
-                                Group By Operator [GBY_19] (rows=87121617 width=135)
-                                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, 1, 1998
-                                <-Reducer 3 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_18]
-                                    PartitionCols:_col0, 1, 1998
-                                    Group By Operator [GBY_17] (rows=174243235 width=135)
-                                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:_col0, 1, 1998
-                                      Select Operator [SEL_15] (rows=174243235 width=135)
-                                        Output:["_col0","_col3"]
-                                        Merge Join Operator [MERGEJOIN_264] (rows=174243235 width=135)
-                                          Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col7"]
-                                        <-Map 9 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_13]
+                            <-Reducer 32 [SIMPLE_EDGE]
+                              SHUFFLE [RS_101]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_98] (rows=87121617 width=135)
+                                  Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                                <-Reducer 31 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_97]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_96] (rows=174243235 width=135)
+                                      Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
+                                      Select Operator [SEL_95] (rows=174243235 width=135)
+                                        Output:["_col7","_col2"]
+                                        Merge Join Operator [MERGEJOIN_267] (rows=174243235 width=135)
+                                          Conds:RS_92._col1=RS_93._col0(Inner),Output:["_col2","_col7"]
+                                        <-Map 34 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_93]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_8] (rows=40000000 width=1014)
+                                            Select Operator [SEL_88] (rows=40000000 width=1014)
                                               Output:["_col0","_col1"]
-                                              Filter Operator [FIL_247] (rows=40000000 width=1014)
+                                              Filter Operator [FIL_254] (rows=40000000 width=1014)
                                                 predicate:(ca_address_sk is not null and ca_county is not null)
-                                                TableScan [TS_6] (rows=40000000 width=1014)
+                                                TableScan [TS_86] (rows=40000000 width=1014)
                                                   default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
-                                        <-Reducer 2 [SIMPLE_EDGE]
-                                          SHUFFLE [RS_12]
+                                        <-Reducer 30 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_92]
                                             PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_263] (rows=158402938 width=135)
-                                              Conds:RS_9._col0=RS_10._col0(Inner),Output:["_col1","_col2"]
-                                            <-Map 1 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_9]
+                                            Merge Join Operator [MERGEJOIN_266] (rows=158402938 width=135)
+                                              Conds:RS_89._col0=RS_90._col0(Inner),Output:["_col1","_col2"]
+                                            <-Map 29 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_89]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_2] (rows=144002668 width=135)
+                                                Select Operator [SEL_82] (rows=144002668 width=135)
                                                   Output:["_col0","_col1","_col2"]
-                                                  Filter Operator [FIL_245] (rows=144002668 width=135)
+                                                  Filter Operator [FIL_252] (rows=144002668 width=135)
                                                     predicate:(ws_sold_date_sk is not null and ws_bill_addr_sk is not null)
-                                                    TableScan [TS_0] (rows=144002668 width=135)
+                                                    TableScan [TS_80] (rows=144002668 width=135)
                                                       default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
-                                            <-Map 8 [SIMPLE_EDGE]
-                                              SHUFFLE [RS_10]
+                                            <-Map 33 [SIMPLE_EDGE]
+                                              SHUFFLE [RS_90]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_5] (rows=18262 width=1119)
+                                                Select Operator [SEL_85] (rows=18262 width=1119)
                                                   Output:["_col0"]
-                                                  Filter Operator [FIL_246] (rows=18262 width=1119)
-                                                    predicate:((d_qoy = 1) and (d_year = 1998) and d_date_sk is not null)
-                                                    TableScan [TS_3] (rows=73049 width=1119)
+                                                  Filter Operator [FIL_253] (rows=18262 width=1119)
+                                                    predicate:((d_qoy = 2) and (d_year = 1998) and d_date_sk is not null)
+                                                    TableScan [TS_83] (rows=73049 width=1119)
                                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
+                        <-Reducer 4 [SIMPLE_EDGE]
+                          SHUFFLE [RS_104]
+                            PartitionCols:_col0
+                            Group By Operator [GBY_18] (rows=348477374 width=88)
+                              Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
+                            <-Reducer 3 [SIMPLE_EDGE]
+                              SHUFFLE [RS_17]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_16] (rows=696954748 width=88)
+                                  Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
+                                  Select Operator [SEL_15] (rows=696954748 width=88)
+                                    Output:["_col7","_col2"]
+                                    Merge Join Operator [MERGEJOIN_259] (rows=696954748 width=88)
+                                      Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col7"]
+                                    <-Map 9 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_13]
+                                        PartitionCols:_col0
+                                        Select Operator [SEL_8] (rows=40000000 width=1014)
+                                          Output:["_col0","_col1"]
+                                          Filter Operator [FIL_242] (rows=40000000 width=1014)
+                                            predicate:(ca_address_sk is not null and ca_county is not null)
+                                            TableScan [TS_6] (rows=40000000 width=1014)
+                                              default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
+                                    <-Reducer 2 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_12]
+                                        PartitionCols:_col1
+                                        Merge Join Operator [MERGEJOIN_258] (rows=633595212 width=88)
+                                          Conds:RS_9._col0=RS_10._col0(Inner),Output:["_col1","_col2"]
+                                        <-Map 1 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_9]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_2] (rows=575995635 width=88)
+                                              Output:["_col0","_col1","_col2"]
+                                              Filter Operator [FIL_240] (rows=575995635 width=88)
+                                                predicate:(ss_sold_date_sk is not null and ss_addr_sk is not null)
+                                                TableScan [TS_0] (rows=575995635 width=88)
+                                                  default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
+                                        <-Map 8 [SIMPLE_EDGE]
+                                          SHUFFLE [RS_10]
+                                            PartitionCols:_col0
+                                            Select Operator [SEL_5] (rows=18262 width=1119)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_241] (rows=18262 width=1119)
+                                                predicate:((d_qoy = 2) and (d_year = 1998) and d_date_sk is not null)
+                                                TableScan [TS_3] (rows=73049 width=1119)
+                                                  default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query39.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query39.q.out b/ql/src/test/results/clientpositive/perf/query39.q.out
index 04129c1..5c90a53 100644
--- a/ql/src/test/results/clientpositive/perf/query39.q.out
+++ b/ql/src/test/results/clientpositive/perf/query39.q.out
@@ -28,27 +28,27 @@ Stage-0
           SHUFFLE [RS_60]
             Select Operator [SEL_59] (rows=13756683 width=15)
               Output:["_col0","_col1","_col3","_col4","_col5","_col6","_col8","_col9"]
-              Merge Join Operator [MERGEJOIN_105] (rows=13756683 width=15)
-                Conds:RS_56._col2, _col1=RS_57._col2, _col1(Inner),Output:["_col1","_col2","_col4","_col5","_col7","_col8","_col10","_col11"]
+              Merge Join Operator [MERGEJOIN_103] (rows=13756683 width=15)
+                Conds:RS_56._col2, _col1=RS_57._col2, _col1(Inner),Output:["_col1","_col2","_col3","_col4","_col6","_col7","_col8","_col9"]
               <-Reducer 15 [SIMPLE_EDGE]
                 SHUFFLE [RS_57]
                   PartitionCols:_col2, _col1
                   Select Operator [SEL_55] (rows=12506076 width=15)
-                    Output:["_col1","_col2","_col4","_col5"]
+                    Output:["_col1","_col2","_col3","_col4"]
                     Filter Operator [FIL_54] (rows=12506076 width=15)
-                      predicate:CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col5 / _col4) > 1.0)) END
-                      Select Operator [SEL_97] (rows=25012152 width=15)
-                        Output:["_col1","_col2","_col4","_col5"]
-                        Group By Operator [GBY_53] (rows=25012152 width=15)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["avg(VALUE._col0)","stddev_samp(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, 4
+                      predicate:CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END
+                      Select Operator [SEL_53] (rows=25012152 width=15)
+                        Output:["_col1","_col2","_col3","_col4"]
+                        Group By Operator [GBY_52] (rows=25012152 width=15)
+                          Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["stddev_samp(VALUE._col0)","avg(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2
                         <-Reducer 14 [SIMPLE_EDGE]
-                          SHUFFLE [RS_52]
-                            PartitionCols:_col0, _col1, _col2, 4
-                            Group By Operator [GBY_51] (rows=50024305 width=15)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["avg(_col4)","stddev_samp(_col4)"],keys:_col0, _col1, _col2, 4
+                          SHUFFLE [RS_51]
+                            PartitionCols:_col0, _col1, _col2
+                            Group By Operator [GBY_50] (rows=50024305 width=15)
+                              Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["stddev_samp(_col3)","avg(_col3)"],keys:_col7, _col8, _col9
                               Select Operator [SEL_49] (rows=50024305 width=15)
-                                Output:["_col0","_col1","_col2","_col4"]
-                                Merge Join Operator [MERGEJOIN_104] (rows=50024305 width=15)
+                                Output:["_col7","_col8","_col9","_col3"]
+                                Merge Join Operator [MERGEJOIN_102] (rows=50024305 width=15)
                                   Conds:RS_46._col2=RS_47._col0(Inner),Output:["_col3","_col7","_col8","_col9"]
                                 <-Map 18 [SIMPLE_EDGE]
                                   SHUFFLE [RS_47]
@@ -62,7 +62,7 @@ Stage-0
                                 <-Reducer 13 [SIMPLE_EDGE]
                                   SHUFFLE [RS_46]
                                     PartitionCols:_col2
-                                    Merge Join Operator [MERGEJOIN_103] (rows=45476640 width=15)
+                                    Merge Join Operator [MERGEJOIN_101] (rows=45476640 width=15)
                                       Conds:RS_43._col1=RS_44._col0(Inner),Output:["_col2","_col3","_col7"]
                                     <-Map 17 [SIMPLE_EDGE]
                                       SHUFFLE [RS_44]
@@ -76,7 +76,7 @@ Stage-0
                                     <-Reducer 12 [SIMPLE_EDGE]
                                       SHUFFLE [RS_43]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_102] (rows=41342400 width=15)
+                                        Merge Join Operator [MERGEJOIN_100] (rows=41342400 width=15)
                                           Conds:RS_40._col0=RS_41._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 11 [SIMPLE_EDGE]
                                           SHUFFLE [RS_40]
@@ -100,21 +100,21 @@ Stage-0
                 SHUFFLE [RS_56]
                   PartitionCols:_col2, _col1
                   Select Operator [SEL_27] (rows=12506076 width=15)
-                    Output:["_col1","_col2","_col4","_col5"]
+                    Output:["_col1","_col2","_col3","_col4"]
                     Filter Operator [FIL_26] (rows=12506076 width=15)
-                      predicate:CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col5 / _col4) > 1.0)) END
-                      Select Operator [SEL_98] (rows=25012152 width=15)
-                        Output:["_col1","_col2","_col4","_col5"]
-                        Group By Operator [GBY_25] (rows=25012152 width=15)
-                          Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["avg(VALUE._col0)","stddev_samp(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2, 3
+                      predicate:CASE WHEN ((_col4 = 0.0)) THEN (false) ELSE (((_col3 / _col4) > 1.0)) END
+                      Select Operator [SEL_25] (rows=25012152 width=15)
+                        Output:["_col1","_col2","_col3","_col4"]
+                        Group By Operator [GBY_24] (rows=25012152 width=15)
+                          Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["stddev_samp(VALUE._col0)","avg(VALUE._col1)"],keys:KEY._col0, KEY._col1, KEY._col2
                         <-Reducer 4 [SIMPLE_EDGE]
-                          SHUFFLE [RS_24]
-                            PartitionCols:_col0, _col1, _col2, 3
-                            Group By Operator [GBY_23] (rows=50024305 width=15)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["avg(_col4)","stddev_samp(_col4)"],keys:_col0, _col1, _col2, 3
+                          SHUFFLE [RS_23]
+                            PartitionCols:_col0, _col1, _col2
+                            Group By Operator [GBY_22] (rows=50024305 width=15)
+                              Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["stddev_samp(_col3)","avg(_col3)"],keys:_col7, _col8, _col9
                               Select Operator [SEL_21] (rows=50024305 width=15)
-                                Output:["_col0","_col1","_col2","_col4"]
-                                Merge Join Operator [MERGEJOIN_101] (rows=50024305 width=15)
+                                Output:["_col7","_col8","_col9","_col3"]
+                                Merge Join Operator [MERGEJOIN_99] (rows=50024305 width=15)
                                   Conds:RS_18._col2=RS_19._col0(Inner),Output:["_col3","_col7","_col8","_col9"]
                                 <-Map 10 [SIMPLE_EDGE]
                                   SHUFFLE [RS_19]
@@ -128,7 +128,7 @@ Stage-0
                                 <-Reducer 3 [SIMPLE_EDGE]
                                   SHUFFLE [RS_18]
                                     PartitionCols:_col2
-                                    Merge Join Operator [MERGEJOIN_100] (rows=45476640 width=15)
+                                    Merge Join Operator [MERGEJOIN_98] (rows=45476640 width=15)
                                       Conds:RS_15._col1=RS_16._col0(Inner),Output:["_col2","_col3","_col7"]
                                     <-Map 9 [SIMPLE_EDGE]
                                       SHUFFLE [RS_16]
@@ -142,7 +142,7 @@ Stage-0
                                     <-Reducer 2 [SIMPLE_EDGE]
                                       SHUFFLE [RS_15]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_99] (rows=41342400 width=15)
+                                        Merge Join Operator [MERGEJOIN_97] (rows=41342400 width=15)
                                           Conds:RS_12._col0=RS_13._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 1 [SIMPLE_EDGE]
                                           SHUFFLE [RS_12]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query42.q.out b/ql/src/test/results/clientpositive/perf/query42.q.out
index c6c4a3c..7100bb3 100644
--- a/ql/src/test/results/clientpositive/perf/query42.q.out
+++ b/ql/src/test/results/clientpositive/perf/query42.q.out
@@ -15,46 +15,46 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5
-      File Output Operator [FS_24]
-        Limit [LIM_23] (rows=100 width=88)
+      File Output Operator [FS_23]
+        Limit [LIM_22] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_22] (rows=348477374 width=88)
+          Select Operator [SEL_21] (rows=348477374 width=88)
             Output:["_col0","_col1","_col2","_col3"]
           <-Reducer 4 [SIMPLE_EDGE]
-            SHUFFLE [RS_21]
-              Select Operator [SEL_20] (rows=348477374 width=88)
+            SHUFFLE [RS_20]
+              Select Operator [SEL_19] (rows=348477374 width=88)
                 Output:["_col1","_col2","_col3"]
-                Group By Operator [GBY_19] (rows=348477374 width=88)
-                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:1998, KEY._col1, KEY._col2
+                Group By Operator [GBY_18] (rows=348477374 width=88)
+                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
                 <-Reducer 3 [SIMPLE_EDGE]
-                  SHUFFLE [RS_18]
-                    PartitionCols:1998, _col1, _col2
-                    Group By Operator [GBY_17] (rows=696954748 width=88)
-                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:1998, _col1, _col2
+                  SHUFFLE [RS_17]
+                    PartitionCols:_col0, _col1
+                    Group By Operator [GBY_16] (rows=696954748 width=88)
+                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)"],keys:_col7, _col8
                       Select Operator [SEL_15] (rows=696954748 width=88)
-                        Output:["_col1","_col2","_col3"]
-                        Merge Join Operator [MERGEJOIN_34] (rows=696954748 width=88)
+                        Output:["_col7","_col8","_col2"]
+                        Merge Join Operator [MERGEJOIN_33] (rows=696954748 width=88)
                           Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col7","_col8"]
                         <-Map 7 [SIMPLE_EDGE]
                           SHUFFLE [RS_13]
                             PartitionCols:_col0
                             Select Operator [SEL_8] (rows=231000 width=1436)
                               Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_32] (rows=231000 width=1436)
+                              Filter Operator [FIL_31] (rows=231000 width=1436)
                                 predicate:((i_manager_id = 1) and i_item_sk is not null)
                                 TableScan [TS_6] (rows=462000 width=1436)
                                   default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_category_id","i_category","i_manager_id"]
                         <-Reducer 2 [SIMPLE_EDGE]
                           SHUFFLE [RS_12]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_33] (rows=633595212 width=88)
+                            Merge Join Operator [MERGEJOIN_32] (rows=633595212 width=88)
                               Conds:RS_9._col0=RS_10._col0(Inner),Output:["_col1","_col2"]
                             <-Map 1 [SIMPLE_EDGE]
                               SHUFFLE [RS_9]
                                 PartitionCols:_col0
                                 Select Operator [SEL_2] (rows=575995635 width=88)
                                   Output:["_col0","_col1","_col2"]
-                                  Filter Operator [FIL_30] (rows=575995635 width=88)
+                                  Filter Operator [FIL_29] (rows=575995635 width=88)
                                     predicate:(ss_sold_date_sk is not null and ss_item_sk is not null)
                                     TableScan [TS_0] (rows=575995635 width=88)
                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_ext_sales_price"]
@@ -63,7 +63,7 @@ Stage-0
                                 PartitionCols:_col0
                                 Select Operator [SEL_5] (rows=18262 width=1119)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_31] (rows=18262 width=1119)
+                                  Filter Operator [FIL_30] (rows=18262 width=1119)
                                     predicate:((d_moy = 12) and (d_year = 1998) and d_date_sk is not null)
                                     TableScan [TS_3] (rows=73049 width=1119)
                                       default@date_dim,dt,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query48.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query48.q.out b/ql/src/test/results/clientpositive/perf/query48.q.out
index 0964564..6473ad8 100644
--- a/ql/src/test/results/clientpositive/perf/query48.q.out
+++ b/ql/src/test/results/clientpositive/perf/query48.q.out
@@ -48,11 +48,11 @@ Stage-0
                         <-Map 10 [SIMPLE_EDGE]
                           SHUFFLE [RS_22]
                             PartitionCols:_col0
-                            Select Operator [SEL_14] (rows=5047 width=16)
+                            Select Operator [SEL_14] (rows=395 width=204)
                               Output:["_col0"]
-                              Filter Operator [FIL_54] (rows=5047 width=16)
+                              Filter Operator [FIL_54] (rows=395 width=204)
                                 predicate:((cd_marital_status = 'M') and (cd_education_status = '4 yr Degree') and cd_demo_sk is not null)
-                                TableScan [TS_12] (rows=20191 width=16)
+                                TableScan [TS_12] (rows=1583 width=204)
                                   default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                         <-Reducer 6 [SIMPLE_EDGE]
                           SHUFFLE [RS_21]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query52.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query52.q.out b/ql/src/test/results/clientpositive/perf/query52.q.out
index 21f3a39..c61ed30 100644
--- a/ql/src/test/results/clientpositive/perf/query52.q.out
+++ b/ql/src/test/results/clientpositive/perf/query52.q.out
@@ -22,17 +22,17 @@ Stage-0
             Output:["_col0","_col1","_col2","_col3"]
           <-Reducer 4 [SIMPLE_EDGE]
             SHUFFLE [RS_21]
-              Select Operator [SEL_20] (rows=348477374 width=88)
+              Select Operator [SEL_19] (rows=348477374 width=88)
                 Output:["_col1","_col2","_col3"]
-                Group By Operator [GBY_19] (rows=348477374 width=88)
-                  Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)"],keys:1998, KEY._col1, KEY._col2
+                Group By Operator [GBY_18] (rows=348477374 width=88)
+                  Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
                 <-Reducer 3 [SIMPLE_EDGE]
-                  SHUFFLE [RS_18]
-                    PartitionCols:1998, _col1, _col2
-                    Group By Operator [GBY_17] (rows=696954748 width=88)
-                      Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col3)"],keys:1998, _col1, _col2
+                  SHUFFLE [RS_17]
+                    PartitionCols:_col0, _col1
+                    Group By Operator [GBY_16] (rows=696954748 width=88)
+                      Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)"],keys:_col7, _col8
                       Select Operator [SEL_15] (rows=696954748 width=88)
-                        Output:["_col1","_col2","_col3"]
+                        Output:["_col7","_col8","_col2"]
                         Merge Join Operator [MERGEJOIN_34] (rows=696954748 width=88)
                           Conds:RS_12._col1=RS_13._col0(Inner),Output:["_col2","_col7","_col8"]
                         <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/10423f51/ql/src/test/results/clientpositive/perf/query64.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query64.q.out b/ql/src/test/results/clientpositive/perf/query64.q.out
index 3654f42..5a5ee74 100644
--- a/ql/src/test/results/clientpositive/perf/query64.q.out
+++ b/ql/src/test/results/clientpositive/perf/query64.q.out
@@ -59,25 +59,25 @@ Stage-0
         <-Reducer 5 [SIMPLE_EDGE]
           SHUFFLE [RS_265]
             Select Operator [SEL_264] (rows=331415616 width=88)
-              Output:["_col0","_col1","_col10","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col2","_col20","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
+              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
               Filter Operator [FIL_263] (rows=331415616 width=88)
-                predicate:(_col34 <= _col15)
+                predicate:(_col30 <= _col13)
                 Merge Join Operator [MERGEJOIN_658] (rows=994246850 width=88)
-                  Conds:RS_260._col1, _col2, _col3=RS_261._col1, _col2, _col3(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col15","_col16","_col17","_col18","_col34","_col35","_col36","_col37"]
+                  Conds:RS_260._col1, _col2, _col3=RS_261._col1, _col2, _col3(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col13","_col14","_col15","_col16","_col30","_col31","_col32","_col33"]
                 <-Reducer 4 [SIMPLE_EDGE]
                   SHUFFLE [RS_260]
                     PartitionCols:_col1, _col2, _col3
-                    Select Operator [SEL_129] (rows=903860754 width=88)
-                      Output:["_col0","_col1","_col10","_col11","_col15","_col16","_col17","_col18","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
-                      Group By Operator [GBY_128] (rows=903860754 width=88)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9, KEY._col10, KEY._col11, 2000, KEY._col13, KEY._col14
+                    Select Operator [SEL_128] (rows=903860754 width=88)
+                      Output:["_col0","_col1","_col10","_col11","_col13","_col14","_col15","_col16","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
+                      Group By Operator [GBY_127] (rows=903860754 width=88)
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9, KEY._col10, KEY._col11, KEY._col12, KEY._col13
                       <-Reducer 3 [SIMPLE_EDGE]
-                        SHUFFLE [RS_127]
-                          PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, 2000, _col13, _col14
-                          Group By Operator [GBY_126] (rows=1807721509 width=88)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"],aggregations:["count()","sum(_col15)","sum(_col16)","sum(_col17)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, 2000, _col13, _col14
+                        SHUFFLE [RS_126]
+                          PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
+                          Group By Operator [GBY_125] (rows=1807721509 width=88)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"],aggregations:["count()","sum(_col26)","sum(_col27)","sum(_col28)"],keys:_col4, _col5, _col6, _col7, _col9, _col10, _col11, _col12, _col40, _col42, _col44, _col45, _col50, _col53
                             Select Operator [SEL_124] (rows=1807721509 width=88)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col13","_col14","_col15","_col16","_col17"]
+                              Output:["_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col40","_col42","_col44","_col45","_col50","_col53","_col26","_col27","_col28"]
                               Merge Join Operator [MERGEJOIN_656] (rows=1807721509 width=88)
                                 Conds:RS_121._col0=RS_122._col18(Inner),Output:["_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col26","_col27","_col28","_col40","_col42","_col44","_col45","_col50","_col53"]
                               <-Reducer 13 [SIMPLE_EDGE]
@@ -364,17 +364,17 @@ Stage-0
                 <-Reducer 45 [SIMPLE_EDGE]
                   SHUFFLE [RS_261]
                     PartitionCols:_col1, _col2, _col3
-                    Select Operator [SEL_259] (rows=903860754 width=88)
-                      Output:["_col1","_col15","_col16","_col17","_col18","_col2","_col3"]
-                      Group By Operator [GBY_258] (rows=903860754 width=88)
-                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9, KEY._col10, KEY._col11, 2001, KEY._col13, KEY._col14
+                    Select Operator [SEL_258] (rows=903860754 width=88)
+                      Output:["_col1","_col13","_col14","_col15","_col16","_col2","_col3"]
+                      Group By Operator [GBY_257] (rows=903860754 width=88)
+                        Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9, KEY._col10, KEY._col11, KEY._col12, KEY._col13
                       <-Reducer 44 [SIMPLE_EDGE]
-                        SHUFFLE [RS_257]
-                          PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, 2001, _col13, _col14
-                          Group By Operator [GBY_256] (rows=1807721509 width=88)
-                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"],aggregations:["count()","sum(_col15)","sum(_col16)","sum(_col17)"],keys:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, 2001, _col13, _col14
+                        SHUFFLE [RS_256]
+                          PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
+                          Group By Operator [GBY_255] (rows=1807721509 width=88)
+                            Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"],aggregations:["count()","sum(_col26)","sum(_col27)","sum(_col28)"],keys:_col4, _col5, _col6, _col7, _col9, _col10, _col11, _col12, _col40, _col42, _col44, _col45, _col50, _col53
                             Select Operator [SEL_254] (rows=1807721509 width=88)
-                              Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col13","_col14","_col15","_col16","_col17"]
+                              Output:["_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col40","_col42","_col44","_col45","_col50","_col53","_col26","_col27","_col28"]
                               Merge Join Operator [MERGEJOIN_657] (rows=1807721509 width=88)
                                 Conds:RS_251._col0=RS_252._col18(Inner),Output:["_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col26","_col27","_col28","_col40","_col42","_col44","_col45","_col50","_col53"]
                               <-Reducer 43 [SIMPLE_EDGE]