You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jd...@apache.org on 2016/04/20 00:24:53 UTC

[01/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Repository: hive
Updated Branches:
  refs/heads/llap 0afaa8f6d -> 99cb7f96f


http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_14.q.out b/ql/src/test/results/clientpositive/tez/vectorization_14.q.out
index 43eec22..2a59833 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_14.q.out
@@ -87,7 +87,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToLong(ctinyint) <= cbigint) and ((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1))) and (cdouble < UDFToDouble(ctinyint))) and ((cbigint > -257) or (cfloat < UDFToFloat(cint)))) (type: boolean)
+                    predicate: ((UDFToLong(ctinyint) <= cbigint) and ((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and (cdouble < UDFToDouble(ctinyint)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint)))) (type: boolean)
                     Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_17.q.out b/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
index 25f6b2a..e812592 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
+                    predicate: ((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                     Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
index 165bfbf..f136f07 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
+                    predicate: ((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
                     Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
@@ -265,7 +265,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
+                    predicate: ((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
                     Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
index f3e31d4..a790b97 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
@@ -829,10 +829,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -969,10 +969,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -2228,10 +2228,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -4006,10 +4006,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -5469,10 +5469,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out
index 0463d31..bfac939 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: ((((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2)) and (cstring1 like '%')) (type: boolean)
+            predicate: (((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2) and (cstring1 like '%')) (type: boolean)
             Select Operator
               expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_date_1.q.out b/ql/src/test/results/clientpositive/vector_date_1.q.out
index 9fa061f..da608bf 100644
--- a/ql/src/test/results/clientpositive/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/vector_date_1.q.out
@@ -496,7 +496,7 @@ STAGE PLANS:
             alias: vector_date_1
             Statistics: Num rows: 3 Data size: 224 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((dt1 = dt1) and (dt1 <> dt2)) and (dt1 < dt2)) and (dt1 <= dt2)) and (dt2 > dt1)) and (dt2 >= dt1)) (type: boolean)
+              predicate: ((dt1 = dt1) and (dt1 <> dt2) and (dt1 < dt2) and (dt1 <= dt2) and (dt2 > dt1) and (dt2 >= dt1)) (type: boolean)
               Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: dt1 (type: date), dt2 (type: date)
@@ -603,7 +603,7 @@ STAGE PLANS:
             alias: vector_date_1
             Statistics: Num rows: 3 Data size: 224 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((dt1 = 2001-01-01) and (2001-01-01 = dt1)) and (dt1 <> 1970-01-01)) and (1970-01-01 <> dt1)) and (dt1 > 1970-01-01)) and (dt1 >= 1970-01-01)) and (1970-01-01 < dt1)) and (1970-01-01 <= dt1)) (type: boolean)
+              predicate: ((dt1 = 2001-01-01) and (2001-01-01 = dt1) and (dt1 <> 1970-01-01) and (1970-01-01 <> dt1) and (dt1 > 1970-01-01) and (dt1 >= 1970-01-01) and (1970-01-01 < dt1) and (1970-01-01 <= dt1)) (type: boolean)
               Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: dt2 (type: date)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
index 6f3f92f..aee5e02 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out
@@ -14,7 +14,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((cdouble is not null and cint is not null) and cboolean1 is not null) and ctimestamp1 is not null) (type: boolean)
+              predicate: (cdouble is not null and cint is not null and cboolean1 is not null and ctimestamp1 is not null) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cdouble (type: double), cint (type: int), cboolean1 (type: boolean), ctimestamp1 (type: timestamp), CAST( cdouble AS decimal(20,10)) (type: decimal(20,10)), CAST( cint AS decimal(23,14)) (type: decimal(23,14)), CAST( cboolean1 AS decimal(5,2)) (type: decimal(5,2)), CAST( ctimestamp1 AS decimal(15,0)) (type: decimal(15,0))

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index 3ca326d..03f6f35 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -35,7 +35,7 @@ STAGE PLANS:
             alias: decimal_test
             Statistics: Num rows: 12288 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((cdecimal1 > 0) and (cdecimal1 < 12345.5678)) and (cdecimal2 <> 0)) and (cdecimal2 > 1000)) and cdouble is not null) (type: boolean)
+              predicate: ((cdecimal1 > 0) and (cdecimal1 < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
               Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vector_interval_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_2.q.out b/ql/src/test/results/clientpositive/vector_interval_2.q.out
index 8b8cf54..7f40f10 100644
--- a/ql/src/test/results/clientpositive/vector_interval_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_2.q.out
@@ -750,7 +750,7 @@ STAGE PLANS:
             alias: vector_interval_2
             Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((((((((CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH))) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH))) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH))) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH))) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH))) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2)) and (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH))) and (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH))) and (1-2 <= CAST( str2 AS INTERVAL 
 YEAR TO MONTH))) and (1-2 < CAST( str2 AS INTERVAL YEAR TO MONTH))) and (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH))) and (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean)
+              predicate: ((CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2) and (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 < CA
 ST( str2 AS INTERVAL YEAR TO MONTH)) and (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean)
               Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ts (type: timestamp)
@@ -895,7 +895,7 @@ STAGE PLANS:
             alias: vector_interval_2
             Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((((((((CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND))) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND))) and (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND))) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND))) and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND))) and (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000)) and (1 02:03:04.000000000 = CAST( st
 r3 AS INTERVAL DAY TO SECOND))) and (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND))) and (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND))) and (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND))) and (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND))) and (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean)
+              predicate: ((CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000) and (1 02:03:04.000000000 = CAST( str3 AS INTERVAL DAY TO SECON
 D)) and (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean)
               Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ts (type: timestamp)
@@ -1030,7 +1030,7 @@ STAGE PLANS:
             alias: vector_interval_2
             Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((((2002-03-01 = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) and (2002-03-01 >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 2002-03-01)) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= 2002-03-01)) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= 2002-03-01)) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) and (2002-03-01 = (dt + 1-2))) and (2002-03-01 <= (dt + 1-2))) and (2002-03-01 >= (dt + 1-2))) and ((dt + 1-2) = 2002-03-01)) and ((dt + 1-2) <= 2002-03-01)) and ((dt + 1-2) >= 2002-03-01)) and (dt <> (dt + 1-2))) (type: boolean)
+              predicate: ((2002-03-01 = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= 2002-03-01) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 = (dt + 1-2)) and (2002-03-01 <= (dt + 1-2)) and (2002-03-01 >= (dt + 1-2)) and ((dt + 1-2) = 2002-03-01) and ((dt + 1-2) <= 2002-03-01) and ((dt + 1-2) >= 2002-03-01) and (dt <> (dt + 1-2))) (type: boolean)
               Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ts (type: timestamp)
@@ -1165,7 +1165,7 @@ STAGE PLANS:
             alias: vector_interval_2
             Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((((((((2002-03-01 01:02:03.0 = (ts + 1-2)) and (2002-03-01 01:02:03.0 <= (ts + 1-2))) and (2002-03-01 01:02:03.0 >= (ts + 1-2))) and (2002-04-01 01:02:03.0 <> (ts + 1-2))) and (2002-02-01 01:02:03.0 < (ts + 1-2))) and (2002-04-01 01:02:03.0 > (ts + 1-2))) and ((ts + 1-2) = 2002-03-01 01:02:03.0)) and ((ts + 1-2) >= 2002-03-01 01:02:03.0)) and ((ts + 1-2) <= 2002-03-01 01:02:03.0)) and ((ts + 1-2) <> 2002-04-01 01:02:03.0)) and ((ts + 1-2) > 2002-02-01 01:02:03.0)) and ((ts + 1-2) < 2002-04-01 01:02:03.0)) and (ts = (ts + 0-0))) and (ts <> (ts + 1-0))) and (ts <= (ts + 1-0))) and (ts < (ts + 1-0))) and (ts >= (ts - 1-0))) and (ts > (ts - 1-0))) (type: boolean)
+              predicate: ((2002-03-01 01:02:03.0 = (ts + 1-2)) and (2002-03-01 01:02:03.0 <= (ts + 1-2)) and (2002-03-01 01:02:03.0 >= (ts + 1-2)) and (2002-04-01 01:02:03.0 <> (ts + 1-2)) and (2002-02-01 01:02:03.0 < (ts + 1-2)) and (2002-04-01 01:02:03.0 > (ts + 1-2)) and ((ts + 1-2) = 2002-03-01 01:02:03.0) and ((ts + 1-2) >= 2002-03-01 01:02:03.0) and ((ts + 1-2) <= 2002-03-01 01:02:03.0) and ((ts + 1-2) <> 2002-04-01 01:02:03.0) and ((ts + 1-2) > 2002-02-01 01:02:03.0) and ((ts + 1-2) < 2002-04-01 01:02:03.0) and (ts = (ts + 0-0)) and (ts <> (ts + 1-0)) and (ts <= (ts + 1-0)) and (ts < (ts + 1-0)) and (ts >= (ts - 1-0)) and (ts > (ts - 1-0))) (type: boolean)
               Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ts (type: timestamp)
@@ -1312,7 +1312,7 @@ STAGE PLANS:
             alias: vector_interval_2
             Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((((((((2001-01-01 01:02:03.0 = (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 <> (dt + 0 01:02:04.000000000))) and (2001-01-01 01:02:03.0 <= (dt + 0 01:02:03.000000000))) and (2001-01-01 01:02:03.0 < (dt + 0 01:02:04.000000000))) and (2001-01-01 01:02:03.0 >= (dt - 0 01:02:03.000000000))) and (2001-01-01 01:02:03.0 > (dt - 0 01:02:04.000000000))) and ((dt + 0 01:02:03.000000000) = 2001-01-01 01:02:03.0)) and ((dt + 0 01:02:04.000000000) <> 2001-01-01 01:02:03.0)) and ((dt + 0 01:02:03.000000000) >= 2001-01-01 01:02:03.0)) and ((dt + 0 01:02:04.000000000) > 2001-01-01 01:02:03.0)) and ((dt - 0 01:02:03.000000000) <= 2001-01-01 01:02:03.0)) and ((dt - 0 01:02:04.000000000) < 2001-01-01 01:02:03.0)) and (ts = (dt + 0 01:02:03.000000000))) and (ts <> (dt + 0 01:02:04.000000000))) and (ts <= (dt + 0 01:02:03.000000000))) and (ts < (dt + 0 01:02:04.000000000))) and (ts >= (dt - 0 01:02:03.000000000))) and (ts > (dt - 0 01:02:04.000000000))) (ty
 pe: boolean)
+              predicate: ((2001-01-01 01:02:03.0 = (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 <> (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 <= (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 < (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 >= (dt - 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 > (dt - 0 01:02:04.000000000)) and ((dt + 0 01:02:03.000000000) = 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) <> 2001-01-01 01:02:03.0) and ((dt + 0 01:02:03.000000000) >= 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) > 2001-01-01 01:02:03.0) and ((dt - 0 01:02:03.000000000) <= 2001-01-01 01:02:03.0) and ((dt - 0 01:02:04.000000000) < 2001-01-01 01:02:03.0) and (ts = (dt + 0 01:02:03.000000000)) and (ts <> (dt + 0 01:02:04.000000000)) and (ts <= (dt + 0 01:02:03.000000000)) and (ts < (dt + 0 01:02:04.000000000)) and (ts >= (dt - 0 01:02:03.000000000)) and (ts > (dt - 0 01:02:04.000000000))) (type: boolean)
               Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ts (type: timestamp)
@@ -1457,7 +1457,7 @@ STAGE PLANS:
             alias: vector_interval_2
             Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((((((((2001-01-01 01:02:03.0 = (ts + 0 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <> (ts + 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 <= (ts + 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 < (ts + 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 >= (ts - 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 > (ts - 1 00:00:00.000000000))) and ((ts + 0 00:00:00.000000000) = 2001-01-01 01:02:03.0)) and ((ts + 1 00:00:00.000000000) <> 2001-01-01 01:02:03.0)) and ((ts + 1 00:00:00.000000000) >= 2001-01-01 01:02:03.0)) and ((ts + 1 00:00:00.000000000) > 2001-01-01 01:02:03.0)) and ((ts - 1 00:00:00.000000000) <= 2001-01-01 01:02:03.0)) and ((ts - 1 00:00:00.000000000) < 2001-01-01 01:02:03.0)) and (ts = (ts + 0 00:00:00.000000000))) and (ts <> (ts + 1 00:00:00.000000000))) and (ts <= (ts + 1 00:00:00.000000000))) and (ts < (ts + 1 00:00:00.000000000))) and (ts >= (ts - 1 00:00:00.000000000))) and (ts > (ts - 1 00:00:00.000000000))) (ty
 pe: boolean)
+              predicate: ((2001-01-01 01:02:03.0 = (ts + 0 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <> (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <= (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 < (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 >= (ts - 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 > (ts - 1 00:00:00.000000000)) and ((ts + 0 00:00:00.000000000) = 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) <> 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) >= 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) > 2001-01-01 01:02:03.0) and ((ts - 1 00:00:00.000000000) <= 2001-01-01 01:02:03.0) and ((ts - 1 00:00:00.000000000) < 2001-01-01 01:02:03.0) and (ts = (ts + 0 00:00:00.000000000)) and (ts <> (ts + 1 00:00:00.000000000)) and (ts <= (ts + 1 00:00:00.000000000)) and (ts < (ts + 1 00:00:00.000000000)) and (ts >= (ts - 1 00:00:00.000000000)) and (ts > (ts - 1 00:00:00.000000000))) (type: boolean)
               Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ts (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
index 9836538..485e352 100644
--- a/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
@@ -733,7 +733,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
@@ -2967,7 +2967,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
@@ -5207,7 +5207,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
@@ -7459,7 +7459,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
@@ -9711,7 +9711,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)
@@ -11963,7 +11963,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
index b99ba4c..20f79c1 100644
--- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
@@ -222,7 +222,7 @@ STAGE PLANS:
             alias: lineitem
             Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+              predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
               Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)
@@ -466,7 +466,7 @@ STAGE PLANS:
             alias: lineitem
             Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean)
+              predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
               Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_orderkey (type: int)
@@ -489,7 +489,7 @@ STAGE PLANS:
             alias: lineitem
             Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+              predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
               Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_14.q.out b/ql/src/test/results/clientpositive/vectorization_14.q.out
index c085a88..6d4f13a 100644
--- a/ql/src/test/results/clientpositive/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_14.q.out
@@ -81,7 +81,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToLong(ctinyint) <= cbigint) and ((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1))) and (cdouble < UDFToDouble(ctinyint))) and ((cbigint > -257) or (cfloat < UDFToFloat(cint)))) (type: boolean)
+              predicate: ((UDFToLong(ctinyint) <= cbigint) and ((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and (cdouble < UDFToDouble(ctinyint)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint)))) (type: boolean)
               Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_17.q.out b/ql/src/test/results/clientpositive/vectorization_17.q.out
index f19b778..294451e 100644
--- a/ql/src/test/results/clientpositive/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_17.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
+              predicate: ((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
               Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vectorization_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out
index 9332664..25e7657 100644
--- a/ql/src/test/results/clientpositive/vectorization_7.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_7.q.out
@@ -68,7 +68,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
+              predicate: ((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
               Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
@@ -251,7 +251,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
+              predicate: ((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
               Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
index 67a1e62..ca938b0 100644
--- a/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out
@@ -58,7 +58,7 @@ STAGE PLANS:
             alias: alltypesorc
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2)) and (cstring1 like '%')) (type: boolean)
+              predicate: (((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2) and (cstring1 like '%')) (type: boolean)
               Statistics: Num rows: 1024 Data size: 220163 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string)


[46/58] [abbrv] hive git commit: HIVE-13477: Set HivePrivilegeObjectType to TABLE_OR_VIEW (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13477: Set HivePrivilegeObjectType to TABLE_OR_VIEW (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98699b3b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98699b3b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98699b3b

Branch: refs/heads/llap
Commit: 98699b3b7e961630e4da1404fa1c94f61dfd1a61
Parents: a207923
Author: Pengcheng Xiong <px...@apache.org>
Authored: Thu Apr 14 13:17:22 2016 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Thu Apr 14 13:17:22 2016 -0700

----------------------------------------------------------------------
 .../hive/ql/security/authorization/plugin/HivePrivilegeObject.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/98699b3b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
index 180006f..41983f1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java
@@ -151,7 +151,7 @@ public class HivePrivilegeObject implements Comparable<HivePrivilegeObject> {
   }
 
   public HivePrivilegeObject(String dbname, String objectName, List<String> columns) {
-    this(null, dbname, objectName, null, columns, null);
+    this(HivePrivilegeObjectType.TABLE_OR_VIEW, dbname, objectName, null, columns, null);
   }
 
   public HivePrivilegeObject(HivePrivilegeObjectType type, String dbname, String objectName,


[35/58] [abbrv] hive git commit: HIVE-13410 : PerfLog metrics scopes not closed if there are exceptions on HS2 (Szehon, reviewed by Aihua Xu)

Posted by jd...@apache.org.
HIVE-13410 : PerfLog metrics scopes not closed if there are exceptions on HS2 (Szehon, reviewed by Aihua Xu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e7f69f07
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e7f69f07
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e7f69f07

Branch: refs/heads/llap
Commit: e7f69f078b981bcf7c36457bedef92b5ae4c6a3d
Parents: 98a7dd8
Author: Szehon Ho <sz...@cloudera.com>
Authored: Wed Apr 13 11:28:52 2016 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Wed Apr 13 11:32:49 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/log/PerfLogger.java   | 51 +++++++++++++++-----
 .../hive/metastore/TestMetaStoreMetrics.java    | 13 +++--
 .../hive/jdbc/miniHS2/TestHs2Metrics.java       | 41 ++++++++++++++--
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  1 +
 4 files changed, 87 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e7f69f07/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
index 8fa5cbf..6f9347c 100644
--- a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
+++ b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java
@@ -21,14 +21,19 @@ package org.apache.hadoop.hive.ql.log;
 import com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.hive.common.metrics.common.Metrics;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
+import org.apache.hadoop.hive.common.metrics.common.MetricsScope;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * PerfLogger.
@@ -122,8 +127,8 @@ public class PerfLogger {
     startTimes.put(method, new Long(startTime));
     if (LOG.isDebugEnabled()) {
       LOG.debug("<PERFLOG method=" + method + " from=" + callerName + ">");
-      beginMetrics(method);
     }
+    beginMetrics(method);
   }
   /**
    * Call this function in correspondence of PerfLogBegin to mark the end of the measurement.
@@ -162,9 +167,8 @@ public class PerfLogger {
       }
       sb.append(">");
       LOG.debug(sb.toString());
-
-      endMetrics(method);
     }
+    endMetrics(method);
     return duration;
   }
 
@@ -202,11 +206,24 @@ public class PerfLogger {
     return duration;
   }
 
+
+  public ImmutableMap<String, Long> getStartTimes() {
+    return ImmutableMap.copyOf(startTimes);
+  }
+
+  public ImmutableMap<String, Long> getEndTimes() {
+    return ImmutableMap.copyOf(endTimes);
+  }
+
+  //Methods for metrics integration.  Each thread-local PerfLogger will open/close scope during each perf-log method.
+  Map<String, MetricsScope> openScopes = new HashMap<String, MetricsScope>();
+
   private void beginMetrics(String method) {
     Metrics metrics = MetricsFactory.getInstance();
     try {
       if (metrics != null) {
-        metrics.startStoredScope(method);
+        MetricsScope scope = metrics.createScope(method);
+        openScopes.put(method, scope);
       }
     } catch (IOException e) {
       LOG.warn("Error recording metrics", e);
@@ -217,18 +234,30 @@ public class PerfLogger {
     Metrics metrics = MetricsFactory.getInstance();
     try {
       if (metrics != null) {
-        metrics.endStoredScope(method);
+        MetricsScope scope = openScopes.remove(method);
+        if (scope != null) {
+          metrics.endScope(scope);
+        }
       }
     } catch (IOException e) {
       LOG.warn("Error recording metrics", e);
     }
   }
 
-  public ImmutableMap<String, Long> getStartTimes() {
-    return ImmutableMap.copyOf(startTimes);
-  }
-
-  public ImmutableMap<String, Long> getEndTimes() {
-    return ImmutableMap.copyOf(endTimes);
+  /**
+   * Cleans up any dangling perfLog metric call scopes.
+   */
+  public void cleanupPerfLogMetrics() {
+    Metrics metrics = MetricsFactory.getInstance();
+    try {
+      if (metrics != null) {
+        for (MetricsScope openScope : openScopes.values()) {
+          metrics.endScope(openScope);
+        }
+      }
+    } catch (IOException e) {
+      LOG.warn("Error cleaning up metrics", e);
+    }
+    openScopes.clear();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e7f69f07/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
index ca21bb6..98cb3ec 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
@@ -42,7 +42,6 @@ public class TestMetaStoreMetrics {
 
   @BeforeClass
   public static void before() throws Exception {
-
     int port = MetaStoreUtils.findFreePort();
 
     hiveConf = new HiveConf(TestMetaStoreMetrics.class);
@@ -51,11 +50,16 @@ public class TestMetaStoreMetrics {
     hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_METRICS, true);
     hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
 
+    MetricsFactory.close();
+    MetricsFactory.init(hiveConf);
+    metrics = (CodahaleMetrics) MetricsFactory.getInstance();
+
+    //Increments one HMS connection
     MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
+
+    //Increments one HMS connection (Hive.get())
     SessionState.start(new CliSessionState(hiveConf));
     driver = new Driver(hiveConf);
-
-    metrics = (CodahaleMetrics) MetricsFactory.getInstance();
   }
 
 
@@ -63,8 +67,9 @@ public class TestMetaStoreMetrics {
   public void testMethodCounts() throws Exception {
     driver.run("show databases");
     String json = metrics.dumpJson();
-    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.TIMER, "api_get_all_databases", 1);
 
+    //one call by init, one called here.
+    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.TIMER, "api_get_all_databases", 2);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/e7f69f07/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java
index 6a98968..7337e9c 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHs2Metrics.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hive.service.cli.CLIServiceClient;
 import org.apache.hive.service.cli.SessionHandle;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -43,7 +45,6 @@ public class TestHs2Metrics {
 
   private static MiniHS2 miniHS2;
   private static Map<String, String> confOverlay;
-  private static CodahaleMetrics metrics;
 
   //Check metrics during semantic analysis.
   public static class MetricCheckingHook implements HiveSemanticAnalyzerHook {
@@ -79,11 +80,14 @@ public class TestHs2Metrics {
     confOverlay.put(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED.varname, "true");
     confOverlay.put(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
     miniHS2.start(confOverlay);
-
-    HiveConf conf = new HiveConf();
+  }
 
 
-    metrics = (CodahaleMetrics) MetricsFactory.getInstance();
+  @Before
+  public void before() throws Exception {
+    HiveConf conf = new HiveConf();
+    MetricsFactory.close();
+    MetricsFactory.init(conf);
   }
 
   @Test
@@ -112,6 +116,35 @@ public class TestHs2Metrics {
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, "active_calls_api_compile", 0);
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, "active_calls_api_hs2_operation_RUNNING", 0);
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, "active_calls_api_hs2_sql_operation_RUNNING", 0);
+
+    serviceClient.closeSession(sessHandle);
+  }
+
+  @Test
+  public void testClosedScopes() throws Exception {
+    CLIServiceClient serviceClient = miniHS2.getServiceClient();
+    SessionHandle sessHandle = serviceClient.openSession("foo", "bar");
+
+    //this should error at analyze scope
+    Exception expectedException = null;
+    try {
+      serviceClient.executeStatement(sessHandle, "select aaa", confOverlay);
+    } catch (Exception e) {
+      expectedException = e;
+    }
+    Assert.assertNotNull("Expected semantic exception", expectedException);
+
+    //verify all scopes were recorded
+    CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
+    String json = metrics.dumpJson();
+    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.TIMER, "api_parse", 1);
+    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.TIMER, "api_semanticAnalyze", 1);
+
+    //verify all scopes are closed.
+    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, "active_calls_api_parse", 0);
+    MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, "active_calls_api_semanticAnalyze", 0);
+
+    serviceClient.closeSession(sessHandle);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/e7f69f07/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index abf94ff..bd510d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -1458,6 +1458,7 @@ public class Driver implements CommandProcessor {
   }
 
   private CommandProcessorResponse createProcessorResponse(int ret) {
+    SessionState.getPerfLogger().cleanupPerfLogMetrics();
     queryDisplay.setErrorMessage(errorMessage);
     return new CommandProcessorResponse(ret, errorMessage, SQLState, downstreamError);
   }


[21/58] [abbrv] hive git commit: HIVE-13438: Add a service check script for llap (Vikram Dixit K, reviewed by Gunther Hagleitner)

Posted by jd...@apache.org.
HIVE-13438: Add a service check script for llap (Vikram Dixit K, reviewed by Gunther Hagleitner)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/672419d0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/672419d0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/672419d0

Branch: refs/heads/llap
Commit: 672419d0f5a519fa7fc8c506f5793881e95f6a80
Parents: af4be3d
Author: vikram <vi...@hortonworks.com>
Authored: Mon Apr 11 13:29:12 2016 -0700
Committer: vikram <vi...@hortonworks.com>
Committed: Mon Apr 11 13:29:12 2016 -0700

----------------------------------------------------------------------
 llap-server/sql/serviceCheckScript.sql | 12 ++++++++++++
 packaging/src/main/assembly/bin.xml    |  9 +++++++++
 2 files changed, 21 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/672419d0/llap-server/sql/serviceCheckScript.sql
----------------------------------------------------------------------
diff --git a/llap-server/sql/serviceCheckScript.sql b/llap-server/sql/serviceCheckScript.sql
new file mode 100644
index 0000000..0c3d64e
--- /dev/null
+++ b/llap-server/sql/serviceCheckScript.sql
@@ -0,0 +1,12 @@
+set hive.execution.mode=llap;
+set hive.llap.execution.mode=all;
+
+
+CREATE temporary TABLE ${hiveconf:hiveLlapServiceCheck} (name VARCHAR(64), age INT)
+  CLUSTERED BY (age) INTO 2 BUCKETS STORED AS ORC;
+ 
+INSERT INTO TABLE ${hiveconf:hiveLlapServiceCheck}
+  VALUES ('fred flintstone', 35), ('barney rubble', 32);
+ 
+select count(1) from ${hiveconf:hiveLlapServiceCheck};
+

http://git-wip-us.apache.org/repos/asf/hive/blob/672419d0/packaging/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
index aaf5c0e..97bef59 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -168,6 +168,15 @@
     </fileSet>
 
     <fileSet>
+      <fileMode>666</fileMode>
+      <directory>${project.parent.basedir}/llap-server/sql</directory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>scripts/llap/sql/</outputDirectory>
+    </fileSet>
+
+    <fileSet>
       <directory>${project.parent.basedir}/llap-server/src/main/resources/</directory>
       <includes>
         <include>**/*.py</include>


[15/58] [abbrv] hive git commit: HIVE-13434 : BaseSemanticAnalyzer.unescapeSQLString doesn't unescape \u0000 style character literals. (Kousuke Saruta via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13434 : BaseSemanticAnalyzer.unescapeSQLString doesn't unescape \u0000 style character literals. (Kousuke Saruta via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0ebd4d17
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0ebd4d17
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0ebd4d17

Branch: refs/heads/llap
Commit: 0ebd4d17ba24809707a68098d05c753a569f62d5
Parents: caafd88
Author: Kousuke Saruta <sa...@oss.nttdata.co.jp>
Authored: Wed Apr 6 07:15:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Apr 9 17:36:46 2016 -0700

----------------------------------------------------------------------
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |  4 +--
 .../hive/ql/parse/TestSemanticAnalyzer.java     | 37 ++++++++++++++++++++
 2 files changed, 38 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0ebd4d17/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index f6ba521..19342a8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -438,8 +438,6 @@ public abstract class BaseSemanticAnalyzer {
     }
   }
 
-  private static final int[] multiplier = new int[] {1000, 100, 10, 1};
-
   @SuppressWarnings("nls")
   public static String unescapeSQLString(String b) {
     Character enclosure = null;
@@ -469,7 +467,7 @@ public abstract class BaseSemanticAnalyzer {
         int base = i + 2;
         for (int j = 0; j < 4; j++) {
           int digit = Character.digit(b.charAt(j + base), 16);
-          code += digit * multiplier[j];
+          code = (code << 4) + digit;
         }
         sb.append((char)code);
         i += 5;

http://git-wip-us.apache.org/repos/asf/hive/blob/0ebd4d17/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java
index be1f7ff..d35fa91 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSemanticAnalyzer.java
@@ -66,4 +66,41 @@ public class TestSemanticAnalyzer {
       assertEquals(result, partSpec.get(colName));
     }
   }
+
+  @Test
+  public void testUnescapeSQLString() {
+    assertEquals("abcdefg", BaseSemanticAnalyzer.unescapeSQLString("\"abcdefg\""));
+
+    // String enclosed by single quotes.
+    assertEquals("C0FFEE", BaseSemanticAnalyzer.unescapeSQLString("\'C0FFEE\'"));
+
+    // Strings including single escaped characters.
+    assertEquals("\u0000", BaseSemanticAnalyzer.unescapeSQLString("'\\0'"));
+    assertEquals("\'", BaseSemanticAnalyzer.unescapeSQLString("\"\\'\""));
+    assertEquals("\"", BaseSemanticAnalyzer.unescapeSQLString("'\\\"'"));
+    assertEquals("\b", BaseSemanticAnalyzer.unescapeSQLString("\"\\b\""));
+    assertEquals("\n", BaseSemanticAnalyzer.unescapeSQLString("'\\n'"));
+    assertEquals("\r", BaseSemanticAnalyzer.unescapeSQLString("\"\\r\""));
+    assertEquals("\t", BaseSemanticAnalyzer.unescapeSQLString("'\\t'"));
+    assertEquals("\u001A", BaseSemanticAnalyzer.unescapeSQLString("\"\\Z\""));
+    assertEquals("\\", BaseSemanticAnalyzer.unescapeSQLString("'\\\\'"));
+    assertEquals("\\%", BaseSemanticAnalyzer.unescapeSQLString("\"\\%\""));
+    assertEquals("\\_", BaseSemanticAnalyzer.unescapeSQLString("'\\_'"));
+
+    // String including '\000' style literal characters.
+    assertEquals("3 + 5 = \u0038", BaseSemanticAnalyzer.unescapeSQLString("'3 + 5 = \\070'"));
+    assertEquals("\u0000", BaseSemanticAnalyzer.unescapeSQLString("\"\\000\""));
+
+    // String including invalid '\000' style literal characters.
+    assertEquals("256", BaseSemanticAnalyzer.unescapeSQLString("\"\\256\""));
+
+    // String including a '\u0000' style literal characters (\u732B is a cat in Kanji).
+    assertEquals("How cute \u732B are",
+      BaseSemanticAnalyzer.unescapeSQLString("\"How cute \\u732B are\""));
+
+    // String including a surrogate pair character
+    // (\uD867\uDE3D is Okhotsk atka mackerel in Kanji).
+    assertEquals("\uD867\uDE3D is a fish",
+      BaseSemanticAnalyzer.unescapeSQLString("\"\\uD867\uDE3D is a fish\""));
+  }
 }


[33/58] [abbrv] hive git commit: Revert "HIVE-13499 - TestJdbcWithMiniHS2.testConcurrentStatements is hanging - Temp patch to disable test"

Posted by jd...@apache.org.
Revert "HIVE-13499 - TestJdbcWithMiniHS2.testConcurrentStatements is hanging - Temp patch to disable test"

This reverts commit 8c182ae10bc03be98ad38defde7518eeadfe9e1b.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b5fe2d2d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b5fe2d2d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b5fe2d2d

Branch: refs/heads/llap
Commit: b5fe2d2dd65ae0a78d4dc9a7c2e8c485e8082a54
Parents: 8c182ae
Author: Thejas Nair <th...@hortonworks.com>
Authored: Wed Apr 13 10:54:07 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Wed Apr 13 10:54:07 2016 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java    | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b5fe2d2d/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 9e3c7e1..10c8ff2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -64,7 +64,6 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestJdbcWithMiniHS2 {
@@ -131,7 +130,6 @@ public class TestJdbcWithMiniHS2 {
     stmt.close();
   }
 
-  @Ignore("Disabling test until hanging issue is resolved.")
   @Test
   public void testConcurrentStatements() throws Exception {
     String tableName = "testConcurrentStatements";


[23/58] [abbrv] hive git commit: HIVE-13439: JDBC: provide a way to retrieve GUID to query Yarn ATS (Vaibhav Gumashta reviewed by Thejas Nair)

Posted by jd...@apache.org.
HIVE-13439: JDBC: provide a way to retrieve GUID to query Yarn ATS (Vaibhav Gumashta reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/58dcd763
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/58dcd763
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/58dcd763

Branch: refs/heads/llap
Commit: 58dcd76331379d180f5a37df60f4511cbc71b89e
Parents: d353216
Author: Vaibhav Gumashta <vg...@hortonworks.com>
Authored: Mon Apr 11 17:55:37 2016 -0700
Committer: Vaibhav Gumashta <vg...@hortonworks.com>
Committed: Mon Apr 11 17:55:37 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   | 78 ++++++++++++++++++++
 .../org/apache/hive/jdbc/HiveStatement.java     | 18 +++++
 2 files changed, 96 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/58dcd763/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
index 4eaff10..b427dc1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
@@ -2343,6 +2343,15 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
   }
 
   /**
+   * Useful for modifying outer class context from anonymous inner class
+   */
+  public static interface Holder<T> {
+    public void set(T obj);
+
+    public T get();
+  }
+
+  /**
    * Test the cancellation of a query that is running.
    * We spawn 2 threads - one running the query and
    * the other attempting to cancel.
@@ -2394,6 +2403,75 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
     stmt.close();
   }
 
+  /**
+   * Test the non-null value of the Yarn ATS GUID.
+   * We spawn 2 threads - one running the query and
+   * the other attempting to read the ATS GUID.
+   * We're using a dummy udf to simulate a query,
+   * that runs for a sufficiently long time.
+   * @throws Exception
+   */
+  @Test
+  public void testYarnATSGuid() throws Exception {
+    String udfName = SleepUDF.class.getName();
+    Statement stmt1 = con.createStatement();
+    stmt1.execute("create temporary function sleepUDF as '" + udfName + "'");
+    stmt1.close();
+    final Statement stmt = con.createStatement();
+    final Holder<Boolean> yarnATSGuidSet = new Holder<Boolean>() {
+      public Boolean b = false;
+
+      public void set(Boolean b) {
+        this.b = b;
+      }
+
+      public Boolean get() {
+        return this.b;
+      }
+    };
+
+    // Thread executing the query
+    Thread tExecute = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          System.out.println("Executing query: ");
+          stmt.executeQuery("select sleepUDF(t1.under_col) as u0, t1.under_col as u1, "
+              + "t2.under_col as u2 from " + tableName + " t1 join " + tableName
+              + " t2 on t1.under_col = t2.under_col");
+        } catch (SQLException e) {
+          // No op
+        }
+      }
+    });
+    // Thread reading the ATS GUID
+    Thread tGuid = new Thread(new Runnable() {
+      @Override
+      public void run() {
+        try {
+          Thread.sleep(10000);
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
+        String atsGuid = ((HiveStatement) stmt).getYarnATSGuid();
+        if (atsGuid != null) {
+          yarnATSGuidSet.set(true);
+          System.out.println("Yarn ATS GUID: " + atsGuid);
+        } else {
+          yarnATSGuidSet.set(false);
+        }
+      }
+    });
+    tExecute.start();
+    tGuid.start();
+    tExecute.join();
+    tGuid.join();
+    if (!yarnATSGuidSet.get()) {
+      fail("Failed to set the YARN ATS Guid");
+    }
+    stmt.close();
+  }
+
   // A udf which sleeps for 100ms to simulate a long running query
   public static class SleepUDF extends UDF {
     public Integer evaluate(final Integer value) {

http://git-wip-us.apache.org/repos/asf/hive/blob/58dcd763/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
index b4dba44..0bbd0e3 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java
@@ -18,6 +18,7 @@
 
 package org.apache.hive.jdbc;
 
+import org.apache.commons.codec.binary.Base64;
 import org.apache.hive.service.cli.RowSet;
 import org.apache.hive.service.cli.RowSetFactory;
 import org.apache.hive.service.rpc.thrift.TCLIService;
@@ -856,4 +857,21 @@ public class HiveStatement implements java.sql.Statement {
       return TFetchOrientation.FETCH_FIRST;
     }
   }
+
+  /**
+   * Returns the Yarn ATS GUID.
+   * This method is a public API for usage outside of Hive, although it is not part of the
+   * interface java.sql.Statement.
+   * @return Yarn ATS GUID or null if it hasn't been created yet.
+   */
+  public String getYarnATSGuid() {
+    if (stmtHandle != null) {
+      // Set on the server side.
+      // @see org.apache.hive.service.cli.operation.SQLOperation#prepare
+      String guid64 =
+          Base64.encodeBase64URLSafeString(stmtHandle.getOperationId().getGuid()).trim();
+      return guid64;
+    }
+    return null;
+  }
 }


[54/58] [abbrv] hive git commit: HIVE-13415 : Decouple Sessions from thrift binary transport (Rajat Khandelwal, reviewed by Szehon Ho)

Posted by jd...@apache.org.
HIVE-13415 : Decouple Sessions from thrift binary transport (Rajat Khandelwal, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b30fe72e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b30fe72e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b30fe72e

Branch: refs/heads/llap
Commit: b30fe72e021a4adb83436c4f19b0f400d1f44edf
Parents: 3fec161
Author: Rajat Khandelwal <pr...@apache.org>
Authored: Fri Apr 15 17:16:26 2016 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Fri Apr 15 17:16:26 2016 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../cli/thrift/ThriftBinaryCLIService.java      |  64 ++++++++-
 .../service/cli/thrift/ThriftCLIService.java    |  57 --------
 .../cli/TestRetryingThriftCLIServiceClient.java | 130 +++++++++++++++----
 4 files changed, 171 insertions(+), 82 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b30fe72e/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c7e5b33..5cf1609 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2304,6 +2304,8 @@ public class HiveConf extends Configuration {
     HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h",
         new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false),
         "The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."),
+    HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT("hive.server2.close.session.on.disconnect", true,
+      "Session will be closed when connection is closed. Set this to false to have session outlive its parent connection."),
     HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d",
         new TimeValidator(TimeUnit.MILLISECONDS),
         "Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."),

http://git-wip-us.apache.org/repos/asf/hive/blob/b30fe72e/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
index cf575a4..d9c7b2e 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java
@@ -24,16 +24,25 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hive.common.metrics.common.Metrics;
+import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
+import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.cli.CLIService;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.SessionHandle;
 import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup;
 import org.apache.thrift.TProcessorFactory;
 import org.apache.thrift.protocol.TBinaryProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.server.ServerContext;
+import org.apache.thrift.server.TServerEventHandler;
 import org.apache.thrift.server.TThreadPoolServer;
 import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TTransport;
 import org.apache.thrift.transport.TTransportFactory;
 
 
@@ -94,7 +103,60 @@ public class ThriftBinaryCLIService extends ThriftCLIService {
 
       // TCP Server
       server = new TThreadPoolServer(sargs);
-      server.setServerEventHandler(serverEventHandler);
+      server.setServerEventHandler(new TServerEventHandler() {
+        @Override
+        public ServerContext createContext(
+          TProtocol input, TProtocol output) {
+          Metrics metrics = MetricsFactory.getInstance();
+          if (metrics != null) {
+            try {
+              metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
+              metrics.incrementCounter(MetricsConstant.CUMULATIVE_CONNECTION_COUNT);
+            } catch (Exception e) {
+              LOG.warn("Error Reporting JDO operation to Metrics system", e);
+            }
+          }
+          return new ThriftCLIServerContext();
+        }
+
+        @Override
+        public void deleteContext(ServerContext serverContext,
+          TProtocol input, TProtocol output) {
+          Metrics metrics = MetricsFactory.getInstance();
+          if (metrics != null) {
+            try {
+              metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
+            } catch (Exception e) {
+              LOG.warn("Error Reporting JDO operation to Metrics system", e);
+            }
+          }
+          ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
+          SessionHandle sessionHandle = context.getSessionHandle();
+          if (sessionHandle != null) {
+            LOG.info("Session disconnected without closing properly. ");
+            try {
+              boolean close = cliService.getSessionManager().getSession(sessionHandle).getHiveConf()
+                .getBoolVar(ConfVars.HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT);
+              LOG.info((close ? "" : "Not ") + "Closing the session: " + sessionHandle);
+              if (close) {
+                cliService.closeSession(sessionHandle);
+              }
+            } catch (HiveSQLException e) {
+              LOG.warn("Failed to close session: " + e, e);
+            }
+          }
+        }
+
+        @Override
+        public void preServe() {
+        }
+
+        @Override
+        public void processContext(ServerContext serverContext,
+          TTransport input, TTransport output) {
+          currentServerContext.set(serverContext);
+        }
+      });
       String msg = "Starting " + ThriftBinaryCLIService.class.getSimpleName() + " on port "
           + portNum + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads";
       LOG.info(msg);

http://git-wip-us.apache.org/repos/asf/hive/blob/b30fe72e/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
index be9833d..e789a38 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
@@ -28,9 +28,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.security.auth.login.LoginException;
 
-import org.apache.hadoop.hive.common.metrics.common.Metrics;
-import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
-import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.common.ServerUtils;
@@ -38,7 +35,6 @@ import org.apache.hadoop.hive.shims.HadoopShims.KerberosNameShim;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.service.AbstractService;
 import org.apache.hive.service.ServiceException;
-import org.apache.hive.service.ServiceUtils;
 import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.auth.TSetIpAddressProcessor;
 import org.apache.hive.service.cli.CLIService;
@@ -97,11 +93,8 @@ import org.apache.hive.service.rpc.thrift.TStatus;
 import org.apache.hive.service.rpc.thrift.TStatusCode;
 import org.apache.hive.service.server.HiveServer2;
 import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TProtocol;
 import org.apache.thrift.server.ServerContext;
 import org.apache.thrift.server.TServer;
-import org.apache.thrift.server.TServerEventHandler;
-import org.apache.thrift.transport.TTransport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -134,7 +127,6 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
   protected int maxWorkerThreads;
   protected long workerKeepAliveTime;
 
-  protected TServerEventHandler serverEventHandler;
   protected ThreadLocal<ServerContext> currentServerContext;
 
   static class ThriftCLIServerContext implements ServerContext {
@@ -153,55 +145,6 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
     super(serviceName);
     this.cliService = service;
     currentServerContext = new ThreadLocal<ServerContext>();
-    serverEventHandler = new TServerEventHandler() {
-      @Override
-      public ServerContext createContext(
-          TProtocol input, TProtocol output) {
-        Metrics metrics = MetricsFactory.getInstance();
-        if (metrics != null) {
-          try {
-            metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS);
-            metrics.incrementCounter(MetricsConstant.CUMULATIVE_CONNECTION_COUNT);
-          } catch (Exception e) {
-            LOG.warn("Error Reporting JDO operation to Metrics system", e);
-          }
-        }
-        return new ThriftCLIServerContext();
-      }
-
-      @Override
-      public void deleteContext(ServerContext serverContext,
-          TProtocol input, TProtocol output) {
-        Metrics metrics = MetricsFactory.getInstance();
-        if (metrics != null) {
-          try {
-            metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS);
-          } catch (Exception e) {
-            LOG.warn("Error Reporting JDO operation to Metrics system", e);
-          }
-        }
-        ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext;
-        SessionHandle sessionHandle = context.getSessionHandle();
-        if (sessionHandle != null) {
-          LOG.info("Session disconnected without closing properly, close it now");
-          try {
-            cliService.closeSession(sessionHandle);
-          } catch (HiveSQLException e) {
-            LOG.warn("Failed to close session: " + e, e);
-          }
-        }
-      }
-
-      @Override
-      public void preServe() {
-      }
-
-      @Override
-      public void processContext(ServerContext serverContext,
-          TTransport input, TTransport output) {
-        currentServerContext.set(serverContext);
-      }
-    };
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/b30fe72e/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java b/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
index 3bd82e6..d36f6c0 100644
--- a/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
+++ b/service/src/test/org/apache/hive/service/cli/TestRetryingThriftCLIServiceClient.java
@@ -19,13 +19,17 @@
 package org.apache.hive.service.cli;
 
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hive.service.Service;
 import org.apache.hive.service.auth.HiveAuthFactory;
+import org.apache.hive.service.cli.session.HiveSession;
 import org.apache.hive.service.cli.thrift.RetryingThriftCLIServiceClient;
 import org.apache.hive.service.cli.thrift.ThriftCLIService;
 import org.apache.hive.service.server.HiveServer2;
 import org.apache.thrift.TException;
 import org.apache.thrift.transport.TTransport;
 import org.apache.thrift.transport.TTransportException;
+
+import org.junit.Before;
 import org.junit.Test;
 
 import java.lang.reflect.Method;
@@ -41,6 +45,38 @@ import static org.junit.Assert.*;
  */
 public class TestRetryingThriftCLIServiceClient {
   protected static ThriftCLIService service;
+  private HiveConf hiveConf;
+  private HiveServer2 server;
+
+  @Before
+  public void init() {
+    hiveConf = new HiveConf();
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, "localhost");
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 15000);
+    hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthFactory.AuthTypes.NONE.toString());
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary");
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT, 3);
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3);
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS, 10);
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, "1s");
+  }
+
+  private void startHiveServer() throws InterruptedException {
+    // Start hive server2
+    server = new HiveServer2();
+    server.init(hiveConf);
+    server.start();
+    Thread.sleep(5000);
+    System.out.println("## HiveServer started");
+  }
+
+  private void stopHiveServer() {
+    if (server != null) {
+      // kill server
+      server.stop();
+    }
+  }
 
   static class RetryingThriftCLIServiceClientTest extends RetryingThriftCLIServiceClient {
     int callCount = 0;
@@ -74,31 +110,14 @@ public class TestRetryingThriftCLIServiceClient {
       return super.connect(conf);
     }
   }
+
   @Test
   public void testRetryBehaviour() throws Exception {
-    // Start hive server2
-    HiveConf hiveConf = new HiveConf();
-    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, "localhost");
-    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 15000);
-    hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
-    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthFactory.AuthTypes.NONE.toString());
-    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary");
-    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT, 3);
-    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3);
-    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS, 10);
-    hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, "1s");
-
-    final HiveServer2 server = new HiveServer2();
-    server.init(hiveConf);
-    server.start();
-    Thread.sleep(5000);
-    System.out.println("## HiveServer started");
-
+    startHiveServer();
     // Check if giving invalid address causes retry in connection attempt
     hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 17000);
     try {
-      CLIServiceClient cliServiceClient =
-        RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
+      RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
       fail("Expected to throw exception for invalid port");
     } catch (HiveSQLException sqlExc) {
       assertTrue(sqlExc.getCause() instanceof TTransportException);
@@ -112,16 +131,14 @@ public class TestRetryingThriftCLIServiceClient {
       = RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
     System.out.println("## Created client");
 
-    // kill server
-    server.stop();
+    stopHiveServer();
     Thread.sleep(5000);
 
     // submit few queries
     try {
-      Map<String, String> confOverlay = new HashMap<String, String>();
       RetryingThriftCLIServiceClientTest.handlerInst.callCount = 0;
       RetryingThriftCLIServiceClientTest.handlerInst.connectCount = 0;
-      SessionHandle session = cliServiceClient.openSession("anonymous", "anonymous");
+      cliServiceClient.openSession("anonymous", "anonymous");
     } catch (HiveSQLException exc) {
       exc.printStackTrace();
       assertTrue(exc.getCause() instanceof TException);
@@ -131,4 +148,69 @@ public class TestRetryingThriftCLIServiceClient {
       cliServiceClient.closeTransport();
     }
   }
+
+  @Test
+  public void testTransportClose() throws InterruptedException, HiveSQLException {
+    hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 0);
+    try {
+      startHiveServer();
+      RetryingThriftCLIServiceClient.CLIServiceClientWrapper client
+        = RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
+      client.closeTransport();
+      try {
+        client.openSession("anonymous", "anonymous");
+        fail("Shouldn't be able to open session when transport is closed.");
+      } catch(HiveSQLException ignored) {
+
+      }
+    } finally {
+      hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT, 3);
+      stopHiveServer();
+    }
+  }
+
+  @Test
+  public void testSessionLifeAfterTransportClose() throws InterruptedException, HiveSQLException {
+    try {
+      startHiveServer();
+      CLIService service = null;
+      for (Service s : server.getServices()) {
+        if (s instanceof CLIService) {
+          service = (CLIService) s;
+        }
+      }
+      if (service == null) {
+        service = new CLIService(server);
+      }
+      RetryingThriftCLIServiceClient.CLIServiceClientWrapper client
+        = RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
+      Map<String, String> conf = new HashMap<>();
+      conf.put(HiveConf.ConfVars.HIVE_SERVER2_CLOSE_SESSION_ON_DISCONNECT.varname, "false");
+      SessionHandle sessionHandle = client.openSession("anonymous", "anonymous", conf);
+      assertNotNull(sessionHandle);
+      HiveSession session = service.getSessionManager().getSession(sessionHandle);
+      OperationHandle op1 = session.executeStatementAsync("show databases", null);
+      assertNotNull(op1);
+      client.closeTransport();
+      // Verify that session wasn't closed on transport close.
+      assertEquals(session, service.getSessionManager().getSession(sessionHandle));
+      // Should be able to execute without failure in the session whose transport has been closed.
+      OperationHandle op2 = session.executeStatementAsync("show databases", null);
+      assertNotNull(op2);
+      // Make new client, since transport was closed for the last one.
+      client = RetryingThriftCLIServiceClientTest.newRetryingCLIServiceClient(hiveConf);
+      client.closeSession(sessionHandle);
+      // operations will be lost once owning session is closed.
+      for (OperationHandle op: new OperationHandle[]{op1, op2}) {
+        try {
+          client.getOperationStatus(op);
+          fail("Should have failed.");
+        } catch (HiveSQLException ignored) {
+
+        }
+      }
+    } finally {
+      stopHiveServer();
+    }
+  }
 }


[02/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_in.q.out b/ql/src/test/results/clientpositive/spark/subquery_in.q.out
index eb6ff15..5c72d1b 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_in.q.out
@@ -788,7 +788,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index 2b7eb47..8b3d353 100644
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@ -100,7 +100,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)
@@ -202,7 +202,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int)
@@ -262,7 +262,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
index c590173..cb3d9a4 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out
@@ -86,7 +86,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToLong(ctinyint) <= cbigint) and ((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1))) and (cdouble < UDFToDouble(ctinyint))) and ((cbigint > -257) or (cfloat < UDFToFloat(cint)))) (type: boolean)
+                    predicate: ((UDFToLong(ctinyint) <= cbigint) and ((UDFToDouble(cint) <= cdouble) or (ctimestamp2 < ctimestamp1)) and (cdouble < UDFToDouble(ctinyint)) and ((cbigint > -257) or (cfloat < UDFToFloat(cint)))) (type: boolean)
                     Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string), cboolean1 (type: boolean), cdouble (type: double), (- (-26.28 + cdouble)) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
index 2105317..799b19f 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
@@ -67,7 +67,7 @@ STAGE PLANS:
                   alias: alltypesorc
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
+                    predicate: ((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257)) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                     Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
index 0463d31..bfac939 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: ((((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2)) and (cstring1 like '%')) (type: boolean)
+            predicate: (((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2) and (cstring1 like '%')) (type: boolean)
             Select Operator
               expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string)
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_in.q.out b/ql/src/test/results/clientpositive/subquery_in.q.out
index 58f5618..20c5538 100644
--- a/ql/src/test/results/clientpositive/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/subquery_in.q.out
@@ -793,7 +793,7 @@ STAGE PLANS:
             alias: lineitem
             Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+              predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
               Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/bucketpruning1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/bucketpruning1.q.out b/ql/src/test/results/clientpositive/tez/bucketpruning1.q.out
index 3557a3b..78df581 100644
--- a/ql/src/test/results/clientpositive/tez/bucketpruning1.q.out
+++ b/ql/src/test/results/clientpositive/tez/bucketpruning1.q.out
@@ -563,13 +563,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((key = 1) and (ds = '2008-04-08')) and (value = 'One')) (type: boolean)
+                  filterExpr: ((key = 1) and (ds = '2008-04-08') and (value = 'One')) (type: boolean)
                   buckets included: [1,] of 16
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((key = 1) and (ds = '2008-04-08')) and (value = 'One')) (type: boolean)
+                    predicate: ((key = 1) and (ds = '2008-04-08') and (value = 'One')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: 1 (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -654,13 +654,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((value = 'One') and (key = 1)) and (ds = '2008-04-08')) (type: boolean)
+                  filterExpr: ((value = 'One') and (key = 1) and (ds = '2008-04-08')) (type: boolean)
                   buckets included: [1,] of 16
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((value = 'One') and (key = 1)) and (ds = '2008-04-08')) (type: boolean)
+                    predicate: ((value = 'One') and (key = 1) and (ds = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: 1 (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -918,13 +918,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((key) IN (2, 3) and (ds = '2008-04-08')) and (value = 'One')) (type: boolean)
+                  filterExpr: ((key) IN (2, 3) and (ds = '2008-04-08') and (value = 'One')) (type: boolean)
                   buckets included: [2,3,] of 16
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((key) IN (2, 3) and (ds = '2008-04-08')) and (value = 'One')) (type: boolean)
+                    predicate: ((key) IN (2, 3) and (ds = '2008-04-08') and (value = 'One')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -1011,13 +1011,13 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((key) IN (2, 3) and (value = 'One')) and (ds = '2008-04-08')) (type: boolean)
+                  filterExpr: ((key) IN (2, 3) and (value = 'One') and (ds = '2008-04-08')) (type: boolean)
                   buckets included: [2,3,] of 16
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((key) IN (2, 3) and (value = 'One')) and (ds = '2008-04-08')) (type: boolean)
+                    predicate: ((key) IN (2, 3) and (value = 'One') and (ds = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -1197,12 +1197,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: ((((key = 1) or (key = 2)) and (value = 'One')) and (ds = '2008-04-08')) (type: boolean)
+                  filterExpr: (((key = 1) or (key = 2)) and (value = 'One') and (ds = '2008-04-08')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((((key = 1) or (key = 2)) and (value = 'One')) and (ds = '2008-04-08')) (type: boolean)
+                    predicate: (((key = 1) or (key = 2)) and (value = 'One') and (ds = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -1593,12 +1593,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (ds = '2008-04-08')) and (value = 'One')) (type: boolean)
+                  filterExpr: ((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (ds = '2008-04-08') and (value = 'One')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (ds = '2008-04-08')) and (value = 'One')) (type: boolean)
+                    predicate: ((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (ds = '2008-04-08') and (value = 'One')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -1700,12 +1700,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (value = 'One')) and (ds = '2008-04-08')) (type: boolean)
+                  filterExpr: ((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (value = 'One') and (ds = '2008-04-08')) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (value = 'One')) and (ds = '2008-04-08')) (type: boolean)
+                    predicate: ((key) IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) and (value = 'One') and (ds = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 'One' (type: string), '2008-04-08' (type: string)
@@ -1889,12 +1889,12 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcbucket_pruned
-                  filterExpr: (((key = 1) and (ds = '2008-04-08')) and ((value = 'One') or (value = 'Two'))) (type: boolean)
+                  filterExpr: ((key = 1) and (ds = '2008-04-08') and ((value = 'One') or (value = 'Two'))) (type: boolean)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((key = 1) and (ds = '2008-04-08')) and ((value = 'One') or (value = 'Two'))) (type: boolean)
+                    predicate: ((key = 1) and (ds = '2008-04-08') and ((value = 'One') or (value = 'Two'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: 1 (type: int), value (type: string), '2008-04-08' (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out b/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
index 8fecbd7..636410a 100644
--- a/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/constprog_semijoin.q.out
@@ -146,7 +146,7 @@ Stage-0
                   Select Operator [SEL_2] (rows=5 width=20)
                     Output:["_col0","_col2"]
                     Filter Operator [FIL_24] (rows=5 width=20)
-                      predicate:(((val = 't1val01') and id is not null) and dimid is not null)
+                      predicate:((val = 't1val01') and id is not null and dimid is not null)
                       TableScan [TS_0] (rows=10 width=20)
                         default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","dimid"]
               <-Map 4 [SIMPLE_EDGE]
@@ -216,7 +216,7 @@ Stage-0
                   Select Operator [SEL_2] (rows=5 width=20)
                     Output:["_col0","_col2"]
                     Filter Operator [FIL_24] (rows=5 width=20)
-                      predicate:(((val = 't1val01') and dimid is not null) and id is not null)
+                      predicate:((val = 't1val01') and dimid is not null and id is not null)
                       TableScan [TS_0] (rows=10 width=20)
                         default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","dimid"]
               <-Map 4 [SIMPLE_EDGE]
@@ -269,7 +269,7 @@ Stage-0
             Select Operator [SEL_2] (rows=5 width=20)
               Output:["_col0","_col1","_col2","_col3"]
               Filter Operator [FIL_15] (rows=5 width=20)
-                predicate:((((dimid = 100) = true) and (dimid <> 100)) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and (dimid <> 100) and (dimid = 100) is not null)
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -280,7 +280,7 @@ Stage-0
               Select Operator [SEL_5] (rows=2 width=3)
                 Output:["_col0","_col1"]
                 Filter Operator [FIL_17] (rows=2 width=3)
-                  predicate:((((id = 100) = true) and (id <> 100)) and (id = 100) is not null)
+                  predicate:(((id = 100) = true) and (id <> 100) and (id = 100) is not null)
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -317,7 +317,7 @@ Stage-0
             Select Operator [SEL_2] (rows=2 width=20)
               Output:["_col0","_col1","_col2","_col3"]
               Filter Operator [FIL_15] (rows=2 width=20)
-                predicate:(((dimid) IN (100, 200) and ((dimid = 100) = true)) and (dimid = 100) is not null)
+                predicate:((dimid) IN (100, 200) and ((dimid = 100) = true) and (dimid = 100) is not null)
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -328,7 +328,7 @@ Stage-0
               Select Operator [SEL_5] (rows=1 width=3)
                 Output:["_col0","_col1"]
                 Filter Operator [FIL_17] (rows=1 width=3)
-                  predicate:(((id) IN (100, 200) and ((id = 100) = true)) and (id = 100) is not null)
+                  predicate:((id) IN (100, 200) and ((id = 100) = true) and (id = 100) is not null)
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -367,7 +367,7 @@ Stage-0
             Select Operator [SEL_2] (rows=2 width=20)
               Output:["_col0","_col1","_col2"]
               Filter Operator [FIL_15] (rows=2 width=20)
-                predicate:((((dimid = 100) = true) and (dimid = 200)) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and (dimid = 200) and (dimid = 100) is not null)
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -377,7 +377,7 @@ Stage-0
               Output:["_col0","_col1"],keys:200, false
               Select Operator [SEL_5] (rows=1 width=3)
                 Filter Operator [FIL_17] (rows=1 width=3)
-                  predicate:((((id = 100) = true) and (id = 200)) and (id = 100) is not null)
+                  predicate:(((id = 100) = true) and (id = 200) and (id = 100) is not null)
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -414,7 +414,7 @@ Stage-0
             Select Operator [SEL_2] (rows=2 width=20)
               Output:["_col0","_col1","_col2"]
               Filter Operator [FIL_15] (rows=2 width=20)
-                predicate:((((dimid = 100) = true) and (dimid = 100)) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and (dimid = 100) and (dimid = 100) is not null)
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -424,7 +424,7 @@ Stage-0
               Output:["_col0","_col1"],keys:100, true
               Select Operator [SEL_5] (rows=1 width=3)
                 Filter Operator [FIL_17] (rows=1 width=3)
-                  predicate:((((id = 100) = true) and (id = 100)) and (id = 100) is not null)
+                  predicate:(((id = 100) = true) and (id = 100) and (id = 100) is not null)
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 
@@ -463,7 +463,7 @@ Stage-0
             Select Operator [SEL_2] (rows=5 width=20)
               Output:["_col0","_col1","_col2","_col3"]
               Filter Operator [FIL_15] (rows=5 width=20)
-                predicate:((((dimid = 100) = true) and dimid is not null) and (dimid = 100) is not null)
+                predicate:(((dimid = 100) = true) and dimid is not null and (dimid = 100) is not null)
                 TableScan [TS_0] (rows=10 width=20)
                   default@table1,table1,Tbl:COMPLETE,Col:NONE,Output:["id","val","val1","dimid"]
         <-Map 3 [SIMPLE_EDGE]
@@ -474,7 +474,7 @@ Stage-0
               Select Operator [SEL_5] (rows=2 width=3)
                 Output:["_col0","_col1"]
                 Filter Operator [FIL_17] (rows=2 width=3)
-                  predicate:((((id = 100) = true) and id is not null) and (id = 100) is not null)
+                  predicate:(((id = 100) = true) and id is not null and (id = 100) is not null)
                   TableScan [TS_3] (rows=5 width=3)
                     default@table3,table3,Tbl:COMPLETE,Col:NONE,Output:["id"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
index 159415d..2626768 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
@@ -1051,10 +1051,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -1189,10 +1189,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -2428,10 +2428,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -4297,10 +4297,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -5734,10 +5734,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 54 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index 5292106..0bf92ef 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -2846,7 +2846,7 @@ STAGE PLANS:
                   alias: over1k
                   Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((i = 100) and (t = 27)) and (s = 'foo')) (type: boolean)
+                    predicate: ((i = 100) and (t = 27) and (s = 'foo')) (type: boolean)
                     Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), 100 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 0eb9132..c70f104 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -429,7 +429,7 @@ Stage-0
                                               Group By Operator [GBY_14] (rows=2 width=101)
                                                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                                 Filter Operator [FIL_49] (rows=5 width=74)
-                                                  predicate:((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                                                  predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                                                   TableScan [TS_11] (rows=20 width=83)
                                                     default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                                 <-Reducer 3 [SIMPLE_EDGE]
@@ -449,7 +449,7 @@ Stage-0
                                               Group By Operator [GBY_3] (rows=2 width=101)
                                                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                                 Filter Operator [FIL_48] (rows=5 width=74)
-                                                  predicate:((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                                                  predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                                                   TableScan [TS_0] (rows=20 width=83)
                                                     default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -528,7 +528,7 @@ Stage-0
                                               Group By Operator [GBY_3] (rows=1 width=101)
                                                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                                 Filter Operator [FIL_44] (rows=1 width=93)
-                                                  predicate:(((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) and key is not null)
+                                                  predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null)
                                                   TableScan [TS_0] (rows=20 width=83)
                                                     default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                                 <-Reducer 9 [SIMPLE_EDGE]
@@ -544,7 +544,7 @@ Stage-0
                                           Group By Operator [GBY_14] (rows=1 width=93)
                                             Output:["_col0","_col1","_col2"],keys:key, c_int, c_float
                                             Filter Operator [FIL_45] (rows=1 width=93)
-                                              predicate:(((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) and key is not null)
+                                              predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null)
                                               TableScan [TS_11] (rows=20 width=83)
                                                 default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -606,7 +606,7 @@ Stage-0
                                   Group By Operator [GBY_3] (rows=1 width=101)
                                     Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                     Filter Operator [FIL_34] (rows=1 width=93)
-                                      predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0))
+                                      predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0))
                                       TableScan [TS_0] (rows=20 width=83)
                                         default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                     <-Reducer 7 [SIMPLE_EDGE]
@@ -622,7 +622,7 @@ Stage-0
                               Group By Operator [GBY_14] (rows=1 width=93)
                                 Output:["_col0","_col1","_col2"],keys:key, c_int, c_float
                                 Filter Operator [FIL_35] (rows=1 width=93)
-                                  predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0))
+                                  predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0))
                                   TableScan [TS_11] (rows=20 width=83)
                                     default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -692,7 +692,7 @@ Stage-0
                                       Group By Operator [GBY_3] (rows=1 width=101)
                                         Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                         Filter Operator [FIL_39] (rows=1 width=93)
-                                          predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0))
+                                          predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0))
                                           TableScan [TS_0] (rows=20 width=83)
                                             default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                         <-Reducer 9 [SIMPLE_EDGE]
@@ -712,7 +712,7 @@ Stage-0
                                       Group By Operator [GBY_14] (rows=1 width=101)
                                         Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                         Filter Operator [FIL_40] (rows=1 width=93)
-                                          predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0))
+                                          predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0))
                                           TableScan [TS_11] (rows=20 width=83)
                                             default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -781,7 +781,7 @@ Stage-0
                                       Group By Operator [GBY_3] (rows=1 width=101)
                                         Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                         Filter Operator [FIL_39] (rows=1 width=93)
-                                          predicate:(((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) and key is not null)
+                                          predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null)
                                           TableScan [TS_0] (rows=20 width=83)
                                             default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                             <-Reducer 7 [SIMPLE_EDGE]
@@ -797,7 +797,7 @@ Stage-0
                                       Group By Operator [GBY_11] (rows=1 width=93)
                                         Output:["_col0","_col1","_col2"],keys:key, c_int, c_float
                                         Filter Operator [FIL_40] (rows=1 width=93)
-                                          predicate:(((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) and key is not null)
+                                          predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and key is not null)
                                           TableScan [TS_8] (rows=20 width=83)
                                             default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -1229,7 +1229,7 @@ Stage-0
                       Select Operator [SEL_2] (rows=9 width=82)
                         Output:["_col0","_col1","_col2"]
                         Filter Operator [FIL_24] (rows=9 width=82)
-                          predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                          predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                           TableScan [TS_0] (rows=20 width=83)
                             default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                   <-Map 4 [SIMPLE_EDGE]
@@ -1238,7 +1238,7 @@ Stage-0
                       Select Operator [SEL_5] (rows=9 width=79)
                         Output:["_col0","_col1"]
                         Filter Operator [FIL_25] (rows=9 width=82)
-                          predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                          predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                           TableScan [TS_3] (rows=20 width=83)
                             default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -1585,7 +1585,7 @@ Stage-0
               Select Operator [SEL_2] (rows=9 width=82)
                 Output:["_col0","_col1"]
                 Filter Operator [FIL_15] (rows=9 width=82)
-                  predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                  predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                   TableScan [TS_0] (rows=20 width=83)
                     default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
           <-Map 3 [SIMPLE_EDGE]
@@ -1625,7 +1625,7 @@ Stage-0
               Select Operator [SEL_2] (rows=9 width=82)
                 Output:["_col0","_col1","_col2"]
                 Filter Operator [FIL_25] (rows=9 width=82)
-                  predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                  predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                   TableScan [TS_0] (rows=20 width=83)
                     default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
           <-Map 3 [SIMPLE_EDGE]
@@ -1636,7 +1636,7 @@ Stage-0
                 Select Operator [SEL_5] (rows=9 width=75)
                   Output:["_col0"]
                   Filter Operator [FIL_26] (rows=9 width=82)
-                    predicate:((((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0))) and key is not null)
+                    predicate:(((c_int + 1) = 2) and ((c_int > 0) or (c_float >= 0.0)) and key is not null)
                     TableScan [TS_3] (rows=20 width=83)
                       default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
           <-Map 4 [SIMPLE_EDGE]
@@ -1717,7 +1717,7 @@ Stage-0
                                     Group By Operator [GBY_3] (rows=1 width=101)
                                       Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                       Filter Operator [FIL_48] (rows=1 width=93)
-                                        predicate:(((((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) and (((c_int + 1) + 1) >= 0)) and (((c_int + 1) > 0) or (UDFToDouble(key) >= 0.0))) and (UDFToDouble(key) > 0.0))
+                                        predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and (((c_int + 1) + 1) >= 0) and (((c_int + 1) > 0) or (UDFToDouble(key) >= 0.0)) and (UDFToDouble(key) > 0.0))
                                         TableScan [TS_0] (rows=20 width=83)
                                           default@cbo_t1,cbo_t1,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
                   <-Reducer 9 [SIMPLE_EDGE]
@@ -1739,7 +1739,7 @@ Stage-0
                                   Group By Operator [GBY_15] (rows=1 width=101)
                                     Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(c_int)"],keys:key, c_int, c_float
                                     Filter Operator [FIL_49] (rows=1 width=93)
-                                      predicate:(((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) and (UDFToDouble(key) > 0.0))
+                                      predicate:(((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0)) and (c_float > 0.0) and ((c_int >= 1) or (c_float >= 1.0)) and ((UDFToFloat(c_int) + c_float) >= 0.0) and (UDFToDouble(key) > 0.0))
                                       TableScan [TS_12] (rows=20 width=83)
                                         default@cbo_t2,cbo_t2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int","c_float"]
 
@@ -2135,7 +2135,7 @@ Stage-0
                   Select Operator [SEL_2] (rows=17 width=16)
                     Output:["_col0","_col1","_col2"]
                     Filter Operator [FIL_28] (rows=17 width=16)
-                      predicate:(((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null)
+                      predicate:((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null)
                       TableScan [TS_0] (rows=100 width=16)
                         default@lineitem,lineitem,Tbl:COMPLETE,Col:COMPLETE,Output:["l_orderkey","l_partkey","l_suppkey","l_linenumber"]
               <-Map 4 [SIMPLE_EDGE]
@@ -2146,7 +2146,7 @@ Stage-0
                     Select Operator [SEL_5] (rows=14 width=4)
                       Output:["_col0"]
                       Filter Operator [FIL_29] (rows=14 width=96)
-                        predicate:(((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null)
+                        predicate:((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null)
                         TableScan [TS_3] (rows=100 width=96)
                           default@lineitem,lineitem,Tbl:COMPLETE,Col:COMPLETE,Output:["l_orderkey","l_linenumber","l_shipmode"]
           <-Reducer 6 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
index db1c5b5..5530660 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
@@ -367,7 +367,7 @@ Stage-0
                                               Select Operator [SEL_11] (rows=42 width=34)
                                                 Output:["_col0","_col1","_col2","_col3","_col4"]
                                                 Filter Operator [FIL_86] (rows=42 width=34)
-                                                  predicate:((((((v3 = 'ssv3') and k2 is not null) and k3 is not null) and k1 is not null) and v1 is not null) and v2 is not null)
+                                                  predicate:((v3 = 'ssv3') and k2 is not null and k3 is not null and k1 is not null and v1 is not null and v2 is not null)
                                                   TableScan [TS_9] (rows=85 width=34)
                                                     default@ss,ss,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
                                           <-Map 7 [SIMPLE_EDGE]
@@ -390,7 +390,7 @@ Stage-0
                                       Select Operator [SEL_20] (rows=42 width=34)
                                         Output:["_col0","_col2","_col3","_col4","_col5"]
                                         Filter Operator [FIL_89] (rows=42 width=34)
-                                          predicate:((((((v1 = 'srv1') and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) and k1 is not null)
+                                          predicate:((v1 = 'srv1') and k2 is not null and k3 is not null and v2 is not null and v3 is not null and k1 is not null)
                                           TableScan [TS_18] (rows=85 width=34)
                                             default@sr,sr,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
                                   <-Map 17 [SIMPLE_EDGE]
@@ -413,7 +413,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=170 width=34)
                                   Output:["_col0","_col1","_col2","_col3"]
                                   Filter Operator [FIL_83] (rows=170 width=34)
-                                    predicate:((v2 is not null and v3 is not null) and k1 is not null)
+                                    predicate:(v2 is not null and v3 is not null and k1 is not null)
                                     TableScan [TS_0] (rows=170 width=34)
                                       default@cs,cs,Tbl:COMPLETE,Col:NONE,Output:["k1","v2","k3","v3"]
                             <-Map 6 [SIMPLE_EDGE]
@@ -1060,7 +1060,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=170 width=34)
                                   Output:["_col0","_col1","_col2","_col3"]
                                   Filter Operator [FIL_83] (rows=170 width=34)
-                                    predicate:((v2 is not null and v3 is not null) and k1 is not null)
+                                    predicate:(v2 is not null and v3 is not null and k1 is not null)
                                     TableScan [TS_0] (rows=170 width=34)
                                       default@cs,cs,Tbl:COMPLETE,Col:NONE,Output:["k1","v2","k3","v3"]
                             <-Select Operator [SEL_5] (rows=250 width=10)
@@ -1084,7 +1084,7 @@ Stage-0
                                     Select Operator [SEL_20] (rows=42 width=34)
                                       Output:["_col0","_col2","_col3","_col4","_col5"]
                                       Filter Operator [FIL_89] (rows=42 width=34)
-                                        predicate:((((((v1 = 'srv1') and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) and k1 is not null)
+                                        predicate:((v1 = 'srv1') and k2 is not null and k3 is not null and v2 is not null and v3 is not null and k1 is not null)
                                         TableScan [TS_18] (rows=85 width=34)
                                           default@sr,sr,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
                                 <-Select Operator [SEL_23] (rows=250 width=10)
@@ -1123,7 +1123,7 @@ Stage-0
                                       Select Operator [SEL_11] (rows=42 width=34)
                                         Output:["_col0","_col1","_col2","_col3","_col4"]
                                         Filter Operator [FIL_86] (rows=42 width=34)
-                                          predicate:((((((v3 = 'ssv3') and k2 is not null) and k3 is not null) and k1 is not null) and v1 is not null) and v2 is not null)
+                                          predicate:((v3 = 'ssv3') and k2 is not null and k3 is not null and k1 is not null and v1 is not null and v2 is not null)
                                           TableScan [TS_9] (rows=85 width=34)
                                             default@ss,ss,Tbl:COMPLETE,Col:NONE,Output:["k1","v1","k2","v2","k3","v3"]
                                   <-Select Operator [SEL_8] (rows=1000 width=10)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_2.q.out b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_2.q.out
index d44d763..e233052 100644
--- a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_2.q.out
@@ -1104,7 +1104,7 @@ STAGE PLANS:
                   alias: x
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key is not null and value is not null) and (value < 'zzzzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1160,7 +1160,7 @@ STAGE PLANS:
                   alias: y1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and (value < 'zzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1173,7 +1173,7 @@ STAGE PLANS:
                   alias: z2
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((value is not null and (key < 'zzzzzzzzzz')) and (value < 'zzzzzzzzzz')) (type: boolean)
+                    predicate: ((key < 'zzzzzzzzzz') and (value < 'zzzzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)
@@ -1288,7 +1288,7 @@ STAGE PLANS:
                   alias: x
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key is not null and value is not null) and (value < 'zzzzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1346,7 +1346,7 @@ STAGE PLANS:
                   alias: y1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and (value < 'zzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1359,7 +1359,7 @@ STAGE PLANS:
                   alias: z2
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((value is not null and (key < 'zzzzzzzzzz')) and (value < 'zzzzzzzzzz')) (type: boolean)
+                    predicate: ((key < 'zzzzzzzzzz') and (value < 'zzzzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/subquery_in.q.out b/ql/src/test/results/clientpositive/tez/subquery_in.q.out
index cc60c53..a3e7833 100644
--- a/ql/src/test/results/clientpositive/tez/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/subquery_in.q.out
@@ -794,7 +794,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out
index 0efee37..1395955 100644
--- a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out
@@ -247,7 +247,7 @@ STAGE PLANS:
                   alias: x
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((t is not null and (date >= '2014-03-04')) and (date < '2014-09-03')) and (u <> 0)) (type: boolean)
+                    predicate: (t is not null and (date >= '2014-03-04') and (date < '2014-09-03') and (u <> 0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Reduce Output Operator
                       key expressions: t (type: string), st (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_date_1.q.out b/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
index 057d974..a27edcb 100644
--- a/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_date_1.q.out
@@ -534,7 +534,7 @@ STAGE PLANS:
                   alias: vector_date_1
                   Statistics: Num rows: 3 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((dt1 = dt1) and (dt1 <> dt2)) and (dt1 < dt2)) and (dt1 <= dt2)) and (dt2 > dt1)) and (dt2 >= dt1)) (type: boolean)
+                    predicate: ((dt1 = dt1) and (dt1 <> dt2) and (dt1 < dt2) and (dt1 <= dt2) and (dt2 > dt1) and (dt2 >= dt1)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dt1 (type: date), dt2 (type: date)
@@ -649,7 +649,7 @@ STAGE PLANS:
                   alias: vector_date_1
                   Statistics: Num rows: 3 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((dt1 = 2001-01-01) and (2001-01-01 = dt1)) and (dt1 <> 1970-01-01)) and (1970-01-01 <> dt1)) and (dt1 > 1970-01-01)) and (dt1 >= 1970-01-01)) and (1970-01-01 < dt1)) and (1970-01-01 <= dt1)) (type: boolean)
+                    predicate: ((dt1 = 2001-01-01) and (2001-01-01 = dt1) and (dt1 <> 1970-01-01) and (1970-01-01 <> dt1) and (dt1 > 1970-01-01) and (dt1 >= 1970-01-01) and (1970-01-01 < dt1) and (1970-01-01 <= dt1)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 74 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dt2 (type: date)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
index 35b7e87..16d9929 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out
@@ -13,7 +13,7 @@ STAGE PLANS:
         TableScan
           alias: alltypesorc
           Filter Operator
-            predicate: (((cdouble is not null and cint is not null) and cboolean1 is not null) and ctimestamp1 is not null) (type: boolean)
+            predicate: (cdouble is not null and cint is not null and cboolean1 is not null and ctimestamp1 is not null) (type: boolean)
             Select Operator
               expressions: cdouble (type: double), cint (type: int), cboolean1 (type: boolean), ctimestamp1 (type: timestamp), CAST( cdouble AS decimal(20,10)) (type: decimal(20,10)), CAST( cint AS decimal(23,14)) (type: decimal(23,14)), CAST( cboolean1 AS decimal(5,2)) (type: decimal(5,2)), CAST( ctimestamp1 AS decimal(15,0)) (type: decimal(15,0))
               outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
index 2976cb5..1b21c99 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
@@ -41,7 +41,7 @@ STAGE PLANS:
                   alias: decimal_test
                   Statistics: Num rows: 12288 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((cdecimal1 > 0) and (cdecimal1 < 12345.5678)) and (cdecimal2 <> 0)) and (cdecimal2 > 1000)) and cdouble is not null) (type: boolean)
+                    predicate: ((cdecimal1 > 0) and (cdecimal1 < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
                     Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vector_interval_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_interval_2.q.out b/ql/src/test/results/clientpositive/tez/vector_interval_2.q.out
index 18a2527..b0cbe64 100644
--- a/ql/src/test/results/clientpositive/tez/vector_interval_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_interval_2.q.out
@@ -788,7 +788,7 @@ STAGE PLANS:
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((((((((((((CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH))) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH))) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH))) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH))) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH))) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2)) and (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH))) and (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH))) and (1-2 <= CAST( str2 AS INT
 ERVAL YEAR TO MONTH))) and (1-2 < CAST( str2 AS INTERVAL YEAR TO MONTH))) and (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH))) and (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean)
+                    predicate: ((CAST( str1 AS INTERVAL YEAR TO MONTH) = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > CAST( str1 AS INTERVAL YEAR TO MONTH)) and (CAST( str1 AS INTERVAL YEAR TO MONTH) = 1-2) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <> 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) <= 1-3) and (CAST( str1 AS INTERVAL YEAR TO MONTH) < 1-3) and (CAST( str2 AS INTERVAL YEAR TO MONTH) >= 1-2) and (CAST( str2 AS INTERVAL YEAR TO MONTH) > 1-2) and (1-2 = CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-2 <> CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-2 <= CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-
 2 < CAST( str2 AS INTERVAL YEAR TO MONTH)) and (1-3 >= CAST( str1 AS INTERVAL YEAR TO MONTH)) and (1-3 > CAST( str1 AS INTERVAL YEAR TO MONTH))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -941,7 +941,7 @@ STAGE PLANS:
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((((((((((((CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND))) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND))) and (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND))) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND))) and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND))) and (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000)) and (1 02:03:04.000000000 = CA
 ST( str3 AS INTERVAL DAY TO SECOND))) and (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND))) and (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND))) and (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND))) and (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND))) and (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean)
+                    predicate: ((CAST( str3 AS INTERVAL DAY TO SECOND) = CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) < CAST( str4 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str4 AS INTERVAL DAY TO SECOND) > CAST( str3 AS INTERVAL DAY TO SECOND)) and (CAST( str3 AS INTERVAL DAY TO SECOND) = 1 02:03:04.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <> 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) <= 1 02:03:05.000000000) and (CAST( str3 AS INTERVAL DAY TO SECOND) < 1 02:03:05.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) >= 1 02:03:04.000000000) and (CAST( str4 AS INTERVAL DAY TO SECOND) > 1 02:03:04.000000000) and (1 02:03:04.000000000 = CAST( str3 AS INTERVAL DAY TO
  SECOND)) and (1 02:03:04.000000000 <> CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 <= CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:04.000000000 < CAST( str4 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 >= CAST( str3 AS INTERVAL DAY TO SECOND)) and (1 02:03:05.000000000 > CAST( str3 AS INTERVAL DAY TO SECOND))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1084,7 +1084,7 @@ STAGE PLANS:
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((((((((2002-03-01 = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) and (2002-03-01 >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 2002-03-01)) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= 2002-03-01)) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= 2002-03-01)) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH)))) and (2002-03-01 = (dt + 1-2))) and (2002-03-01 <= (dt + 1-2))) and (2002-03-01 >= (dt + 1-2))) and ((dt + 1-2) = 2002-03-01)) and ((dt + 1-2) <= 2002-03-01)) and ((dt + 1-2) >= 2002-03-01)) and (dt <> (dt + 1-2))) (type: boolean)
+                    predicate: ((2002-03-01 = (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 <= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 >= (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) = 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) <= 2002-03-01) and ((dt + CAST( str1 AS INTERVAL YEAR TO MONTH)) >= 2002-03-01) and (dt <> (dt + CAST( str1 AS INTERVAL YEAR TO MONTH))) and (2002-03-01 = (dt + 1-2)) and (2002-03-01 <= (dt + 1-2)) and (2002-03-01 >= (dt + 1-2)) and ((dt + 1-2) = 2002-03-01) and ((dt + 1-2) <= 2002-03-01) and ((dt + 1-2) >= 2002-03-01) and (dt <> (dt + 1-2))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1227,7 +1227,7 @@ STAGE PLANS:
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((((((((((((2002-03-01 01:02:03.0 = (ts + 1-2)) and (2002-03-01 01:02:03.0 <= (ts + 1-2))) and (2002-03-01 01:02:03.0 >= (ts + 1-2))) and (2002-04-01 01:02:03.0 <> (ts + 1-2))) and (2002-02-01 01:02:03.0 < (ts + 1-2))) and (2002-04-01 01:02:03.0 > (ts + 1-2))) and ((ts + 1-2) = 2002-03-01 01:02:03.0)) and ((ts + 1-2) >= 2002-03-01 01:02:03.0)) and ((ts + 1-2) <= 2002-03-01 01:02:03.0)) and ((ts + 1-2) <> 2002-04-01 01:02:03.0)) and ((ts + 1-2) > 2002-02-01 01:02:03.0)) and ((ts + 1-2) < 2002-04-01 01:02:03.0)) and (ts = (ts + 0-0))) and (ts <> (ts + 1-0))) and (ts <= (ts + 1-0))) and (ts < (ts + 1-0))) and (ts >= (ts - 1-0))) and (ts > (ts - 1-0))) (type: boolean)
+                    predicate: ((2002-03-01 01:02:03.0 = (ts + 1-2)) and (2002-03-01 01:02:03.0 <= (ts + 1-2)) and (2002-03-01 01:02:03.0 >= (ts + 1-2)) and (2002-04-01 01:02:03.0 <> (ts + 1-2)) and (2002-02-01 01:02:03.0 < (ts + 1-2)) and (2002-04-01 01:02:03.0 > (ts + 1-2)) and ((ts + 1-2) = 2002-03-01 01:02:03.0) and ((ts + 1-2) >= 2002-03-01 01:02:03.0) and ((ts + 1-2) <= 2002-03-01 01:02:03.0) and ((ts + 1-2) <> 2002-04-01 01:02:03.0) and ((ts + 1-2) > 2002-02-01 01:02:03.0) and ((ts + 1-2) < 2002-04-01 01:02:03.0) and (ts = (ts + 0-0)) and (ts <> (ts + 1-0)) and (ts <= (ts + 1-0)) and (ts < (ts + 1-0)) and (ts >= (ts - 1-0)) and (ts > (ts - 1-0))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1382,7 +1382,7 @@ STAGE PLANS:
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((((((((((((2001-01-01 01:02:03.0 = (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 <> (dt + 0 01:02:04.000000000))) and (2001-01-01 01:02:03.0 <= (dt + 0 01:02:03.000000000))) and (2001-01-01 01:02:03.0 < (dt + 0 01:02:04.000000000))) and (2001-01-01 01:02:03.0 >= (dt - 0 01:02:03.000000000))) and (2001-01-01 01:02:03.0 > (dt - 0 01:02:04.000000000))) and ((dt + 0 01:02:03.000000000) = 2001-01-01 01:02:03.0)) and ((dt + 0 01:02:04.000000000) <> 2001-01-01 01:02:03.0)) and ((dt + 0 01:02:03.000000000) >= 2001-01-01 01:02:03.0)) and ((dt + 0 01:02:04.000000000) > 2001-01-01 01:02:03.0)) and ((dt - 0 01:02:03.000000000) <= 2001-01-01 01:02:03.0)) and ((dt - 0 01:02:04.000000000) < 2001-01-01 01:02:03.0)) and (ts = (dt + 0 01:02:03.000000000))) and (ts <> (dt + 0 01:02:04.000000000))) and (ts <= (dt + 0 01:02:03.000000000))) and (ts < (dt + 0 01:02:04.000000000))) and (ts >= (dt - 0 01:02:03.000000000))) and (ts > (dt - 0 01:02:04.000000000)
 )) (type: boolean)
+                    predicate: ((2001-01-01 01:02:03.0 = (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 <> (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 <= (dt + 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 < (dt + 0 01:02:04.000000000)) and (2001-01-01 01:02:03.0 >= (dt - 0 01:02:03.000000000)) and (2001-01-01 01:02:03.0 > (dt - 0 01:02:04.000000000)) and ((dt + 0 01:02:03.000000000) = 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) <> 2001-01-01 01:02:03.0) and ((dt + 0 01:02:03.000000000) >= 2001-01-01 01:02:03.0) and ((dt + 0 01:02:04.000000000) > 2001-01-01 01:02:03.0) and ((dt - 0 01:02:03.000000000) <= 2001-01-01 01:02:03.0) and ((dt - 0 01:02:04.000000000) < 2001-01-01 01:02:03.0) and (ts = (dt + 0 01:02:03.000000000)) and (ts <> (dt + 0 01:02:04.000000000)) and (ts <= (dt + 0 01:02:03.000000000)) and (ts < (dt + 0 01:02:04.000000000)) and (ts >= (dt - 0 01:02:03.000000000)) and (ts > (dt - 0 01:02:04.000000000))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)
@@ -1535,7 +1535,7 @@ STAGE PLANS:
                   alias: vector_interval_2
                   Statistics: Num rows: 2 Data size: 788 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((((((((((((2001-01-01 01:02:03.0 = (ts + 0 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <> (ts + 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 <= (ts + 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 < (ts + 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 >= (ts - 1 00:00:00.000000000))) and (2001-01-01 01:02:03.0 > (ts - 1 00:00:00.000000000))) and ((ts + 0 00:00:00.000000000) = 2001-01-01 01:02:03.0)) and ((ts + 1 00:00:00.000000000) <> 2001-01-01 01:02:03.0)) and ((ts + 1 00:00:00.000000000) >= 2001-01-01 01:02:03.0)) and ((ts + 1 00:00:00.000000000) > 2001-01-01 01:02:03.0)) and ((ts - 1 00:00:00.000000000) <= 2001-01-01 01:02:03.0)) and ((ts - 1 00:00:00.000000000) < 2001-01-01 01:02:03.0)) and (ts = (ts + 0 00:00:00.000000000))) and (ts <> (ts + 1 00:00:00.000000000))) and (ts <= (ts + 1 00:00:00.000000000))) and (ts < (ts + 1 00:00:00.000000000))) and (ts >= (ts - 1 00:00:00.000000000))) and (ts > (ts - 1 00:00:00.000000000)
 )) (type: boolean)
+                    predicate: ((2001-01-01 01:02:03.0 = (ts + 0 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <> (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 <= (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 < (ts + 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 >= (ts - 1 00:00:00.000000000)) and (2001-01-01 01:02:03.0 > (ts - 1 00:00:00.000000000)) and ((ts + 0 00:00:00.000000000) = 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) <> 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) >= 2001-01-01 01:02:03.0) and ((ts + 1 00:00:00.000000000) > 2001-01-01 01:02:03.0) and ((ts - 1 00:00:00.000000000) <= 2001-01-01 01:02:03.0) and ((ts - 1 00:00:00.000000000) < 2001-01-01 01:02:03.0) and (ts = (ts + 0 00:00:00.000000000)) and (ts <> (ts + 1 00:00:00.000000000)) and (ts <= (ts + 1 00:00:00.000000000)) and (ts < (ts + 1 00:00:00.000000000)) and (ts >= (ts - 1 00:00:00.000000000)) and (ts > (ts - 1 00:00:00.000000000))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 394 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ts (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
index d7bf9af..92ad7b9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
@@ -740,7 +740,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)
@@ -2998,7 +2998,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)
@@ -5279,7 +5279,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)
@@ -7604,7 +7604,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)
@@ -9934,7 +9934,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)
@@ -12259,7 +12259,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
index 8f5090a..2864a48 100644
--- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
@@ -41,7 +41,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)
@@ -198,7 +198,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_linenumber = 1) and l_partkey is not null) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_linenumber = 1) and l_partkey is not null and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int), l_partkey (type: int), l_suppkey (type: int)
@@ -227,7 +227,7 @@ STAGE PLANS:
                   alias: lineitem
                   Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((l_shipmode = 'AIR') and (l_linenumber = 1)) and l_orderkey is not null) (type: boolean)
+                    predicate: ((l_shipmode = 'AIR') and (l_linenumber = 1) and l_orderkey is not null) (type: boolean)
                     Statistics: Num rows: 25 Data size: 2999 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: l_orderkey (type: int)


[34/58] [abbrv] hive git commit: HIVE-13499 : TestJdbcWithMiniHS2 is hanging - temp patch to disable the test class

Posted by jd...@apache.org.
HIVE-13499 : TestJdbcWithMiniHS2 is hanging - temp patch to disable the test class


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98a7dd8c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98a7dd8c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98a7dd8c

Branch: refs/heads/llap
Commit: 98a7dd8c88969654baadcee65d34863e97835aa4
Parents: b5fe2d2
Author: Thejas Nair <th...@hortonworks.com>
Authored: Wed Apr 13 10:55:50 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Wed Apr 13 10:55:50 2016 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java    | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/98a7dd8c/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 10c8ff2..857805a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -64,8 +64,10 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
+@Ignore("Disabling test until hanging issue is resolved.")
 public class TestJdbcWithMiniHS2 {
   private static MiniHS2 miniHS2 = null;
   private static String dataFileDir;


[10/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/677e5d20
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/677e5d20
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/677e5d20

Branch: refs/heads/llap
Commit: 677e5d20109e31203129ef5090c8989e9bb7c366
Parents: 9a00b2f
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue Mar 29 14:37:51 2016 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Apr 9 14:21:09 2016 -0700

----------------------------------------------------------------------
 .../test/results/positive/hbase_pushdown.q.out  |    2 +-
 .../test/results/positive/ppd_key_ranges.q.out  |    2 +-
 .../hive/ql/index/IndexPredicateAnalyzer.java   |   35 +-
 .../hadoop/hive/ql/plan/ExprNodeDescUtils.java  |   14 +-
 .../clientpositive/annotate_stats_filter.q.out  |    2 +-
 .../results/clientpositive/auto_join16.q.out    |    4 +-
 .../results/clientpositive/auto_join4.q.out     |    2 +-
 .../results/clientpositive/auto_join5.q.out     |    2 +-
 .../results/clientpositive/auto_join8.q.out     |    2 +-
 .../auto_join_reordering_values.q.out           |    2 +-
 .../test/results/clientpositive/cbo_const.q.out |    2 +-
 .../results/clientpositive/cbo_rp_join1.q.out   |    4 +-
 .../clientpositive/constprog_semijoin.q.out     |   24 +-
 .../clientpositive/correlationoptimizer13.q.out |    2 +-
 .../clientpositive/correlationoptimizer9.q.out  |    4 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |    8 +-
 .../dynpart_sort_optimization.q.out             |    2 +-
 .../clientpositive/filter_cond_pushdown.q.out   |    2 +-
 .../clientpositive/groupby_position.q.out       |    4 +-
 .../identity_project_remove_skip.q.out          |    2 +-
 .../clientpositive/index_auto_mult_tables.q.out |   20 +-
 .../index_auto_mult_tables_compact.q.out        |   20 +-
 .../clientpositive/index_auto_self_join.q.out   |   20 +-
 .../results/clientpositive/index_bitmap3.q.out  |    4 +-
 .../clientpositive/index_bitmap_auto.q.out      |    4 +-
 .../index_bitmap_compression.q.out              |    4 +-
 .../clientpositive/infer_const_type.q.out       |    4 +-
 .../clientpositive/input_testxpath4.q.out       |    2 +-
 ql/src/test/results/clientpositive/join16.q.out |    4 +-
 ql/src/test/results/clientpositive/join19.q.out |    8 +-
 ql/src/test/results/clientpositive/join4.q.out  |    2 +-
 ql/src/test/results/clientpositive/join42.q.out |    2 +-
 ql/src/test/results/clientpositive/join5.q.out  |    2 +-
 ql/src/test/results/clientpositive/join8.q.out  |    2 +-
 .../clientpositive/join_grp_diff_keys.q.out     |    2 +-
 .../results/clientpositive/join_reorder2.q.out  |    2 +-
 .../results/clientpositive/join_reorder3.q.out  |    2 +-
 .../llap/dynamic_partition_pruning.q.out        |   20 +-
 .../llap/hybridgrace_hashjoin_2.q.out           |   12 +-
 .../llap/tez_union_group_by.q.out               |    2 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   20 +-
 .../clientpositive/louter_join_ppr.q.out        |    4 +-
 .../test/results/clientpositive/masking_1.q.out |   12 +-
 .../test/results/clientpositive/masking_2.q.out |   10 +-
 .../test/results/clientpositive/masking_3.q.out |    2 +-
 .../test/results/clientpositive/masking_4.q.out |    4 +-
 .../test/results/clientpositive/masking_5.q.out |    2 +-
 .../clientpositive/masking_disablecbo_1.q.out   |   14 +-
 .../clientpositive/masking_disablecbo_2.q.out   |   10 +-
 .../clientpositive/masking_disablecbo_3.q.out   |    2 +-
 .../clientpositive/masking_disablecbo_4.q.out   |    4 +-
 .../results/clientpositive/multiMapJoin1.q.out  |   16 +-
 .../clientpositive/orc_predicate_pushdown.q.out |   24 +-
 .../parquet_predicate_pushdown.q.out            |   28 +-
 .../results/clientpositive/perf/query13.q.out   |    6 +-
 .../results/clientpositive/perf/query15.q.out   |    2 +-
 .../results/clientpositive/perf/query17.q.out   |    6 +-
 .../results/clientpositive/perf/query18.q.out   |    6 +-
 .../results/clientpositive/perf/query19.q.out   |    4 +-
 .../results/clientpositive/perf/query21.q.out   |    2 +-
 .../results/clientpositive/perf/query22.q.out   |    2 +-
 .../results/clientpositive/perf/query25.q.out   |   12 +-
 .../results/clientpositive/perf/query26.q.out   |    4 +-
 .../results/clientpositive/perf/query27.q.out   |    4 +-
 .../results/clientpositive/perf/query29.q.out   |   10 +-
 .../results/clientpositive/perf/query31.q.out   |   12 +-
 .../results/clientpositive/perf/query34.q.out   |    6 +-
 .../results/clientpositive/perf/query39.q.out   |    8 +-
 .../results/clientpositive/perf/query40.q.out   |    2 +-
 .../results/clientpositive/perf/query42.q.out   |    2 +-
 .../results/clientpositive/perf/query45.q.out   |    4 +-
 .../results/clientpositive/perf/query46.q.out   |    4 +-
 .../results/clientpositive/perf/query48.q.out   |    6 +-
 .../results/clientpositive/perf/query50.q.out   |    6 +-
 .../results/clientpositive/perf/query52.q.out   |    2 +-
 .../results/clientpositive/perf/query54.q.out   |   10 +-
 .../results/clientpositive/perf/query55.q.out   |    2 +-
 .../results/clientpositive/perf/query64.q.out   |   16 +-
 .../results/clientpositive/perf/query65.q.out   |    2 +-
 .../results/clientpositive/perf/query66.q.out   |    4 +-
 .../results/clientpositive/perf/query67.q.out   |    2 +-
 .../results/clientpositive/perf/query68.q.out   |    4 +-
 .../results/clientpositive/perf/query7.q.out    |    4 +-
 .../results/clientpositive/perf/query71.q.out   |   12 +-
 .../results/clientpositive/perf/query72.q.out   |    6 +-
 .../results/clientpositive/perf/query73.q.out   |    6 +-
 .../results/clientpositive/perf/query75.q.out   |   12 +-
 .../results/clientpositive/perf/query76.q.out   |    6 +-
 .../results/clientpositive/perf/query79.q.out   |    4 +-
 .../results/clientpositive/perf/query80.q.out   |    6 +-
 .../results/clientpositive/perf/query82.q.out   |    4 +-
 .../results/clientpositive/perf/query84.q.out   |    4 +-
 .../results/clientpositive/perf/query85.q.out   |   10 +-
 .../results/clientpositive/perf/query88.q.out   |   32 +-
 .../results/clientpositive/perf/query89.q.out   |    4 +-
 .../results/clientpositive/perf/query90.q.out   |    4 +-
 .../results/clientpositive/perf/query91.q.out   |    8 +-
 .../results/clientpositive/perf/query92.q.out   |    4 +-
 .../results/clientpositive/perf/query93.q.out   |    2 +-
 .../results/clientpositive/perf/query94.q.out   |    2 +-
 .../results/clientpositive/perf/query95.q.out   |    2 +-
 .../results/clientpositive/perf/query96.q.out   |    4 +-
 .../results/clientpositive/ppd_gby_join.q.out   |    8 +-
 .../test/results/clientpositive/ppd_join.q.out  |    8 +-
 .../test/results/clientpositive/ppd_join2.q.out |   12 +-
 .../test/results/clientpositive/ppd_join3.q.out |   12 +-
 .../test/results/clientpositive/ppd_join4.q.out |    2 +-
 .../clientpositive/ppd_outer_join2.q.out        |    8 +-
 .../clientpositive/ppd_outer_join3.q.out        |    8 +-
 .../clientpositive/ppd_outer_join4.q.out        |   12 +-
 .../results/clientpositive/ppd_udf_case.q.out   |    4 +-
 .../test/results/clientpositive/ppd_union.q.out |    8 +-
 ql/src/test/results/clientpositive/ppd_vc.q.out |    2 +-
 .../clientpositive/rcfile_null_value.q.out      |    2 +-
 .../clientpositive/router_join_ppr.q.out        |    4 +-
 .../test/results/clientpositive/sample8.q.out   |    4 +-
 .../test/results/clientpositive/semijoin.q.out  |    2 +-
 .../test/results/clientpositive/semijoin2.q.out |    4 +-
 .../test/results/clientpositive/semijoin4.q.out |    4 +-
 .../clientpositive/skewjoin_mapjoin9.q.out      |    4 +-
 .../results/clientpositive/skewjoinopt12.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt14.q.out  |    4 +-
 .../results/clientpositive/skewjoinopt16.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt17.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt2.q.out   |   16 +-
 .../results/clientpositive/smb_mapjoin_10.q.out |    2 +-
 .../results/clientpositive/smb_mapjoin_14.q.out |    2 +-
 .../clientpositive/sort_merge_join_desc_2.q.out |    2 +-
 .../clientpositive/sort_merge_join_desc_3.q.out |    2 +-
 .../clientpositive/sort_merge_join_desc_4.q.out |    4 +-
 .../clientpositive/sort_merge_join_desc_8.q.out |    4 +-
 .../clientpositive/spark/auto_join16.q.out      |    4 +-
 .../clientpositive/spark/auto_join4.q.out       |    2 +-
 .../clientpositive/spark/auto_join5.q.out       |    2 +-
 .../clientpositive/spark/auto_join8.q.out       |    2 +-
 .../spark/auto_join_reordering_values.q.out     |    2 +-
 .../spark/constprog_semijoin.q.out              |   24 +-
 .../spark/dynamic_rdd_cache.q.out               |    8 +-
 .../clientpositive/spark/groupby_position.q.out |    4 +-
 .../spark/identity_project_remove_skip.q.out    |    2 +-
 .../spark/index_auto_self_join.q.out            |   12 +-
 .../clientpositive/spark/index_bitmap3.q.out    |   38 +-
 .../spark/index_bitmap_auto.q.out               |    4 +-
 .../results/clientpositive/spark/join16.q.out   |    4 +-
 .../results/clientpositive/spark/join19.q.out   |    8 +-
 .../results/clientpositive/spark/join4.q.out    |    2 +-
 .../results/clientpositive/spark/join5.q.out    |    2 +-
 .../results/clientpositive/spark/join8.q.out    |    2 +-
 .../clientpositive/spark/join_reorder2.q.out    |    2 +-
 .../clientpositive/spark/join_reorder3.q.out    |    2 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |    4 +-
 .../clientpositive/spark/ppd_gby_join.q.out     |    8 +-
 .../results/clientpositive/spark/ppd_join.q.out |    8 +-
 .../clientpositive/spark/ppd_join2.q.out        |   12 +-
 .../clientpositive/spark/ppd_join3.q.out        |   12 +-
 .../clientpositive/spark/ppd_outer_join2.q.out  |    8 +-
 .../clientpositive/spark/ppd_outer_join3.q.out  |    8 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |   12 +-
 .../clientpositive/spark/router_join_ppr.q.out  |    4 +-
 .../results/clientpositive/spark/sample8.q.out  |    4 +-
 .../results/clientpositive/spark/semijoin.q.out |    2 +-
 .../clientpositive/spark/skewjoinopt12.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt14.q.out    |    4 +-
 .../clientpositive/spark/skewjoinopt16.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt17.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt2.q.out     |   16 +-
 .../clientpositive/spark/smb_mapjoin_10.q.out   |    2 +-
 .../clientpositive/spark/smb_mapjoin_14.q.out   |    2 +-
 .../spark/sort_merge_join_desc_2.q.out          |   22 +-
 .../spark/sort_merge_join_desc_3.q.out          |   22 +-
 .../spark/sort_merge_join_desc_4.q.out          |   26 +-
 .../spark/sort_merge_join_desc_8.q.out          |   42 +-
 .../spark/spark_dynamic_partition_pruning.q.out | 2313 +++++++++-------
 ...k_vectorized_dynamic_partition_pruning.q.out | 2515 +++++++++++-------
 .../clientpositive/spark/subquery_in.q.out      |    2 +-
 .../spark/vector_mapjoin_reduce.q.out           |    6 +-
 .../clientpositive/spark/vectorization_14.q.out |    2 +-
 .../clientpositive/spark/vectorization_17.q.out |    2 +-
 .../spark/vectorized_string_funcs.q.out         |    2 +-
 .../results/clientpositive/subquery_in.q.out    |    2 +-
 .../clientpositive/tez/bucketpruning1.q.out     |   32 +-
 .../clientpositive/tez/constprog_semijoin.q.out |   24 +-
 .../tez/dynamic_partition_pruning.q.out         |   20 +-
 .../tez/dynpart_sort_optimization.q.out         |    2 +-
 .../clientpositive/tez/explainuser_1.q.out      |   38 +-
 .../clientpositive/tez/explainuser_2.q.out      |   12 +-
 .../tez/hybridgrace_hashjoin_2.q.out            |   12 +-
 .../clientpositive/tez/subquery_in.q.out        |    2 +-
 .../clientpositive/tez/tez_union_group_by.q.out |    2 +-
 .../clientpositive/tez/vector_date_1.q.out      |    4 +-
 .../tez/vector_decimal_cast.q.out               |    2 +-
 .../tez/vector_decimal_expressions.q.out        |    2 +-
 .../clientpositive/tez/vector_interval_2.q.out  |   12 +-
 .../tez/vector_leftsemi_mapjoin.q.out           |   12 +-
 .../tez/vector_mapjoin_reduce.q.out             |    6 +-
 .../clientpositive/tez/vectorization_14.q.out   |    2 +-
 .../clientpositive/tez/vectorization_17.q.out   |    2 +-
 .../clientpositive/tez/vectorization_7.q.out    |    4 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   20 +-
 .../tez/vectorized_string_funcs.q.out           |    2 +-
 .../results/clientpositive/vector_date_1.q.out  |    4 +-
 .../clientpositive/vector_decimal_cast.q.out    |    2 +-
 .../vector_decimal_expressions.q.out            |    2 +-
 .../clientpositive/vector_interval_2.q.out      |   12 +-
 .../vector_leftsemi_mapjoin.q.out               |   12 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |    6 +-
 .../clientpositive/vectorization_14.q.out       |    2 +-
 .../clientpositive/vectorization_17.q.out       |    2 +-
 .../clientpositive/vectorization_7.q.out        |    4 +-
 .../vectorized_string_funcs.q.out               |    2 +-
 210 files changed, 3616 insertions(+), 2721 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_pushdown.q.out b/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
index d5661be..a42e36f 100644
--- a/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
@@ -233,7 +233,7 @@ STAGE PLANS:
             alias: hbase_pushdown
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((key = 80) and (key = 90)) and (value like '%90%')) (type: boolean)
+              predicate: ((key = 80) and (key = 90) and (value like '%90%')) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: 90 (type: int), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out b/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
index 812ce95..34c3b23 100644
--- a/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
+++ b/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
@@ -189,7 +189,7 @@ STAGE PLANS:
             alias: hbase_ppd_keyrange
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((key >= 9) and (key < 17)) and (key = 11)) (type: boolean)
+              predicate: ((key >= 9) and (key < 17) and (key = 11)) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: 11 (type: int), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
index 6702d43..2f0deae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/index/IndexPredicateAnalyzer.java
@@ -200,29 +200,30 @@ public class IndexPredicateAnalyzer {
     }
     return expr;
   }
-  
+
   private ExprNodeDesc analyzeExpr(
     ExprNodeGenericFuncDesc expr,
     List<IndexSearchCondition> searchConditions,
     Object... nodeOutputs) throws SemanticException {
 
     if (FunctionRegistry.isOpAnd(expr)) {
-      assert(nodeOutputs.length == 2);
-      ExprNodeDesc residual1 = (ExprNodeDesc) nodeOutputs[0];
-      ExprNodeDesc residual2 = (ExprNodeDesc) nodeOutputs[1];
-      if (residual1 == null) {
-        return residual2;
+      assert(nodeOutputs.length >= 2);
+      List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
+      for (Object residual : nodeOutputs) {
+        if (null != residual) {
+          residuals.add((ExprNodeDesc)residual);
+        }
       }
-      if (residual2 == null) {
-        return residual1;
+      if (residuals.size() == 0) {
+        return null;
+      } else if (residuals.size() == 1) {
+        return residuals.get(0);
+      } else if (residuals.size() > 1) {
+        return new ExprNodeGenericFuncDesc(
+            TypeInfoFactory.booleanTypeInfo,
+            FunctionRegistry.getGenericUDFForAnd(),
+            residuals);
       }
-      List<ExprNodeDesc> residuals = new ArrayList<ExprNodeDesc>();
-      residuals.add(residual1);
-      residuals.add(residual2);
-      return new ExprNodeGenericFuncDesc(
-        TypeInfoFactory.booleanTypeInfo,
-        FunctionRegistry.getGenericUDFForAnd(),
-        residuals);
     }
 
     GenericUDF genericUDF = expr.getGenericUDF();
@@ -236,12 +237,12 @@ public class IndexPredicateAnalyzer {
       expr1 = getColumnExpr(expr1);
       expr2 = getColumnExpr(expr2);
     }
-    
+
     ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair(expr1, expr2);
     if (extracted == null || (extracted.length > 2 && !acceptsFields)) {
       return expr;
     }
-    
+
     ExprNodeColumnDesc columnDesc;
     ExprNodeConstantDesc constantDesc;
     if (extracted[0] instanceof ExprNodeConstantDesc) {

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
index 223718e..c6f8907 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java
@@ -108,9 +108,17 @@ public class ExprNodeDescUtils {
    * bind two predicates by AND op
    */
   public static ExprNodeGenericFuncDesc mergePredicates(ExprNodeDesc prev, ExprNodeDesc next) {
-    List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>(2);
-    children.add(prev);
-    children.add(next);
+    final List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>(2);
+    if (FunctionRegistry.isOpAnd(prev)) {
+      children.addAll(prev.getChildren());
+    } else {
+      children.add(prev);
+    }
+    if (FunctionRegistry.isOpAnd(next)) {
+      children.addAll(next.getChildren());
+    } else {
+      children.add(next);
+    }
     return new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo,
         FunctionRegistry.getGenericUDFForAnd(), children);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
index 7e697f1..ba0419e 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
@@ -756,7 +756,7 @@ STAGE PLANS:
             alias: loc_orc
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
-              predicate: (((year = 2001) and (state = 'OH')) and (state = 'FL')) (type: boolean)
+              predicate: ((year = 2001) and (state = 'OH') and (state = 'FL')) (type: boolean)
               Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: 'FL' (type: string), locid (type: int), zip (type: bigint), 2001 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join16.q.out b/ql/src/test/results/clientpositive/auto_join16.q.out
index fc8712d..51573f1 100644
--- a/ql/src/test/results/clientpositive/auto_join16.q.out
+++ b/ql/src/test/results/clientpositive/auto_join16.q.out
@@ -32,7 +32,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -50,7 +50,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join4.q.out b/ql/src/test/results/clientpositive/auto_join4.q.out
index 5ee76e4..a4afc7b 100644
--- a/ql/src/test/results/clientpositive/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/auto_join4.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join5.q.out b/ql/src/test/results/clientpositive/auto_join5.q.out
index 71da744..bbc23dc 100644
--- a/ql/src/test/results/clientpositive/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/auto_join5.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out
index 80dd575..324f95d 100644
--- a/ql/src/test/results/clientpositive/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/auto_join8.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
index ac349a4..db79fa5 100644
--- a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
+++ b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out
@@ -184,7 +184,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (((date is not null and dealid is not null) and cityid is not null) and userid is not null) (type: boolean)
+              predicate: (date is not null and dealid is not null and cityid is not null and userid is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: dealid (type: int), date (type: string), cityid (type: int), userid (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/cbo_const.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_const.q.out b/ql/src/test/results/clientpositive/cbo_const.q.out
index 770a6aa..c2a5194 100644
--- a/ql/src/test/results/clientpositive/cbo_const.q.out
+++ b/ql/src/test/results/clientpositive/cbo_const.q.out
@@ -294,7 +294,7 @@ STAGE PLANS:
             alias: z
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((ds = '2008-04-08') and (UDFToDouble(hr) = 14.0)) and value is not null) (type: boolean)
+              predicate: ((ds = '2008-04-08') and (UDFToDouble(hr) = 14.0) and value is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
index 97ec21a..f3982b8 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_join1.q.out
@@ -353,7 +353,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: (((_col0 = _col1) and (_col1 > 50)) and (_col0 > 40)) (type: boolean)
+                predicate: ((_col0 = _col1) and (_col1 > 50) and (_col0 > 40)) (type: boolean)
                 Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
@@ -367,7 +367,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: (((_col0 = _col1) and (_col1 > 50)) and (_col0 > 40)) (type: boolean)
+                predicate: ((_col0 = _col1) and (_col1 > 50) and (_col0 > 40)) (type: boolean)
                 Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/constprog_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/constprog_semijoin.q.out b/ql/src/test/results/clientpositive/constprog_semijoin.q.out
index 940a148..35d062d 100644
--- a/ql/src/test/results/clientpositive/constprog_semijoin.q.out
+++ b/ql/src/test/results/clientpositive/constprog_semijoin.q.out
@@ -158,7 +158,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((val = 't1val01') and id is not null) and dimid is not null) (type: boolean)
+              predicate: ((val = 't1val01') and id is not null and dimid is not null) (type: boolean)
               Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), dimid (type: int)
@@ -290,7 +290,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((val = 't1val01') and dimid is not null) and id is not null) (type: boolean)
+              predicate: ((val = 't1val01') and dimid is not null and id is not null) (type: boolean)
               Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), dimid (type: int)
@@ -421,7 +421,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((dimid = 100) = true) and (dimid <> 100)) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and (dimid <> 100) and (dimid = 100) is not null) (type: boolean)
               Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -437,7 +437,7 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((id = 100) = true) and (id <> 100)) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and (id <> 100) and (id = 100) is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), (id = 100) (type: boolean)
@@ -502,7 +502,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((dimid) IN (100, 200) and ((dimid = 100) = true)) and (dimid = 100) is not null) (type: boolean)
+              predicate: ((dimid) IN (100, 200) and ((dimid = 100) = true) and (dimid = 100) is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -518,7 +518,7 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((id) IN (100, 200) and ((id = 100) = true)) and (id = 100) is not null) (type: boolean)
+              predicate: ((id) IN (100, 200) and ((id = 100) = true) and (id = 100) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), (id = 100) (type: boolean)
@@ -585,7 +585,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((dimid = 100) = true) and (dimid = 200)) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and (dimid = 200) and (dimid = 100) is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string)
@@ -601,7 +601,7 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((id = 100) = true) and (id = 200)) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and (id = 200) and (id = 100) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
@@ -664,7 +664,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((dimid = 100) = true) and (dimid = 100)) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and (dimid = 100) and (dimid = 100) is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string)
@@ -680,7 +680,7 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((id = 100) = true) and (id = 100)) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and (id = 100) and (id = 100) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
@@ -745,7 +745,7 @@ STAGE PLANS:
             alias: table1
             Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((dimid = 100) = true) and dimid is not null) and (dimid = 100) is not null) (type: boolean)
+              predicate: (((dimid = 100) = true) and dimid is not null and (dimid = 100) is not null) (type: boolean)
               Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -761,7 +761,7 @@ STAGE PLANS:
             alias: table3
             Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((id = 100) = true) and id is not null) and (id = 100) is not null) (type: boolean)
+              predicate: (((id = 100) = true) and id is not null and (id = 100) is not null) (type: boolean)
               Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: id (type: int), (id = 100) (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/correlationoptimizer13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer13.q.out b/ql/src/test/results/clientpositive/correlationoptimizer13.q.out
index 8aeec44..ac5bdc6 100644
--- a/ql/src/test/results/clientpositive/correlationoptimizer13.q.out
+++ b/ql/src/test/results/clientpositive/correlationoptimizer13.q.out
@@ -162,7 +162,7 @@ STAGE PLANS:
             alias: x
             Statistics: Num rows: 1028 Data size: 22964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((c2 > 100) and (c1 < 120)) and c3 is not null) (type: boolean)
+              predicate: ((c2 > 100) and (c1 < 120) and c3 is not null) (type: boolean)
               Statistics: Num rows: 114 Data size: 2546 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c3 (type: string), c1 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/correlationoptimizer9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer9.q.out b/ql/src/test/results/clientpositive/correlationoptimizer9.q.out
index 59f6abd..97988b9 100644
--- a/ql/src/test/results/clientpositive/correlationoptimizer9.q.out
+++ b/ql/src/test/results/clientpositive/correlationoptimizer9.q.out
@@ -464,7 +464,7 @@ STAGE PLANS:
             alias: x
             Statistics: Num rows: 1028 Data size: 22964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((c2 > 100) and (c1 < 120)) and c3 is not null) (type: boolean)
+              predicate: ((c2 > 100) and (c1 < 120) and c3 is not null) (type: boolean)
               Statistics: Num rows: 114 Data size: 2546 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: int), c3 (type: string)
@@ -579,7 +579,7 @@ STAGE PLANS:
             alias: x
             Statistics: Num rows: 1028 Data size: 22964 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((c2 > 100) and (c1 < 120)) and c3 is not null) (type: boolean)
+              predicate: ((c2 > 100) and (c1 < 120) and c3 is not null) (type: boolean)
               Statistics: Num rows: 114 Data size: 2546 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: c1 (type: int), c3 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
index ea9529d..9a09c4c 100644
--- a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out
@@ -926,7 +926,7 @@ STAGE PLANS:
             alias: inventory
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null) (type: boolean)
+              predicate: (inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: inv_date_sk (type: int), inv_item_sk (type: int), inv_quantity_on_hand (type: int), inv_warehouse_sk (type: int)
@@ -1025,7 +1025,7 @@ STAGE PLANS:
             alias: date_dim
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((d_year = 1999) and (d_moy = 3)) and d_date_sk is not null) (type: boolean)
+              predicate: ((d_year = 1999) and (d_moy = 3) and d_date_sk is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: d_date_sk (type: int)
@@ -1163,7 +1163,7 @@ STAGE PLANS:
             alias: inventory
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null) (type: boolean)
+              predicate: (inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: inv_date_sk (type: int), inv_item_sk (type: int), inv_quantity_on_hand (type: int), inv_warehouse_sk (type: int)
@@ -1262,7 +1262,7 @@ STAGE PLANS:
             alias: date_dim
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((d_year = 1999) and (d_moy = 4)) and d_date_sk is not null) (type: boolean)
+              predicate: ((d_year = 1999) and (d_moy = 4) and d_date_sk is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: d_date_sk (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 857d609..13383fb 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -2656,7 +2656,7 @@ STAGE PLANS:
             alias: over1k
             Statistics: Num rows: 3949 Data size: 106636 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((i = 100) and (t = 27)) and (s = 'foo')) (type: boolean)
+              predicate: ((i = 100) and (t = 27) and (s = 'foo')) (type: boolean)
               Statistics: Num rows: 493 Data size: 13312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: si (type: smallint), b (type: bigint), f (type: float), 'foo' (type: string), 27 (type: tinyint), 100 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
index 738286e..f48a5a4 100644
--- a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
@@ -419,7 +419,7 @@ STAGE PLANS:
             alias: f
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((value = '2008-04-10') or (value = '2008-04-08')) and (value <> '')) and key is not null) (type: boolean)
+              predicate: (((value = '2008-04-10') or (value = '2008-04-08')) and (value <> '') and key is not null) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_position.q.out b/ql/src/test/results/clientpositive/groupby_position.q.out
index 86900fc..53f4a3e 100644
--- a/ql/src/test/results/clientpositive/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/groupby_position.q.out
@@ -561,7 +561,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: key (type: string), value (type: string)
@@ -647,7 +647,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 keys: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/identity_project_remove_skip.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/identity_project_remove_skip.q.out b/ql/src/test/results/clientpositive/identity_project_remove_skip.q.out
index 8742155..7ec14e8 100644
--- a/ql/src/test/results/clientpositive/identity_project_remove_skip.q.out
+++ b/ql/src/test/results/clientpositive/identity_project_remove_skip.q.out
@@ -35,7 +35,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and (value = 'val_105')) and (key = '105')) (type: boolean)
+              predicate: ((value = 'val_105') and (key = '105')) (type: boolean)
               Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
index e1ef94b..a34654c 100644
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
@@ -22,7 +22,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -38,7 +38,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
               Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -220,9 +220,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index_bitmap__
-            filterExpr: (((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: (((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset
@@ -260,10 +260,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            filterExpr: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -277,10 +277,10 @@ STAGE PLANS:
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
-            filterExpr: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
               Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -313,9 +313,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__srcpart_srcpart_index_bitmap__
-            filterExpr: (((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: (((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
index 1e5899b..33a52ff 100644
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out
@@ -22,7 +22,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -38,7 +38,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
               Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -227,9 +227,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index_compact__
-            filterExpr: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offsets (type: array<bigint>)
                 outputColumnNames: _col0, _col1
@@ -260,10 +260,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            filterExpr: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -277,10 +277,10 @@ STAGE PLANS:
                   value expressions: _col1 (type: string)
           TableScan
             alias: b
-            filterExpr: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
               Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -341,9 +341,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__srcpart_srcpart_index_compact__
-            filterExpr: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offsets (type: array<bigint>)
                 outputColumnNames: _col0, _col1

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/index_auto_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/index_auto_self_join.q.out
index 189a35a..6bb2e94 100644
--- a/ql/src/test/results/clientpositive/index_auto_self_join.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_self_join.q.out
@@ -20,7 +20,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and value is not null) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -36,7 +36,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and value is not null) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -132,9 +132,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index__
-            filterExpr: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset
@@ -172,10 +172,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: a
-            filterExpr: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and value is not null) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and value is not null) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -189,10 +189,10 @@ STAGE PLANS:
                   value expressions: _col0 (type: string)
           TableScan
             alias: a
-            filterExpr: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and value is not null) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and value is not null) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -230,9 +230,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index__
-            filterExpr: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/index_bitmap3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap3.q.out b/ql/src/test/results/clientpositive/index_bitmap3.q.out
index 5269b9c..dc51c77 100644
--- a/ql/src/test/results/clientpositive/index_bitmap3.q.out
+++ b/ql/src/test/results/clientpositive/index_bitmap3.q.out
@@ -115,7 +115,7 @@ STAGE PLANS:
             alias: default__src_src1_index__
             Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) = 0.0) and _bucketname is not null) and _offset is not null) (type: boolean)
+              predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
@@ -131,7 +131,7 @@ STAGE PLANS:
             alias: default__src_src2_index__
             Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((value = 'val_0') and _bucketname is not null) and _offset is not null) (type: boolean)
+              predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_auto.q.out b/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
index 8c34084..bfab87f 100644
--- a/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
+++ b/ql/src/test/results/clientpositive/index_bitmap_auto.q.out
@@ -134,7 +134,7 @@ STAGE PLANS:
             alias: default__src_src1_index__
             Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) = 0.0) and _bucketname is not null) and _offset is not null) (type: boolean)
+              predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
@@ -150,7 +150,7 @@ STAGE PLANS:
             alias: default__src_src2_index__
             Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((value = 'val_0') and _bucketname is not null) and _offset is not null) (type: boolean)
+              predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
               Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
index d8fba35..662cbcf 100644
--- a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
+++ b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
@@ -39,9 +39,9 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: default__src_src_index__
-            filterExpr: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+            filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
             Filter Operator
-              predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
+              predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
                 outputColumnNames: _bucketname, _offset

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/infer_const_type.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_const_type.q.out b/ql/src/test/results/clientpositive/infer_const_type.q.out
index bd0fb9a..4ff8c87 100644
--- a/ql/src/test/results/clientpositive/infer_const_type.q.out
+++ b/ql/src/test/results/clientpositive/infer_const_type.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
             alias: infertypes
             Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((ti = 127) and (si = 32767)) and (i = 12345)) and (bi = -12345)) and (fl = 906.0)) and (db = -307.0)) and (UDFToDouble(str) = 1234.0)) (type: boolean)
+              predicate: ((ti = 127) and (si = 32767) and (i = 12345) and (bi = -12345) and (fl = 906.0) and (db = -307.0) and (UDFToDouble(str) = 1234.0)) (type: boolean)
               Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 127 (type: tinyint), 32767 (type: smallint), 12345 (type: int), -12345 (type: bigint), 906.0 (type: float), -307.0 (type: double), str (type: string)
@@ -259,7 +259,7 @@ STAGE PLANS:
             alias: infertypes
             Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(ti) < 127.0) and (UDFToDouble(i) > 100.0)) and (UDFToDouble(str) = 1.57)) (type: boolean)
+              predicate: ((UDFToDouble(ti) < 127.0) and (UDFToDouble(i) > 100.0) and (UDFToDouble(str) = 1.57)) (type: boolean)
               Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/input_testxpath4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input_testxpath4.q.out b/ql/src/test/results/clientpositive/input_testxpath4.q.out
index cdd8273..44c0f4f 100644
--- a/ql/src/test/results/clientpositive/input_testxpath4.q.out
+++ b/ql/src/test/results/clientpositive/input_testxpath4.q.out
@@ -96,7 +96,7 @@ STAGE PLANS:
             alias: src_thrift
             Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((mstringstring['key_9'] is not null and lintstring.myint is not null) and lintstring is not null) (type: boolean)
+              predicate: (mstringstring['key_9'] is not null and lintstring.myint is not null and lintstring is not null) (type: boolean)
               Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: mstringstring['key_9'] (type: string), lintstring.myint (type: array<int>)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join16.q.out b/ql/src/test/results/clientpositive/join16.q.out
index d0b5e19..7434819 100644
--- a/ql/src/test/results/clientpositive/join16.q.out
+++ b/ql/src/test/results/clientpositive/join16.q.out
@@ -14,7 +14,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -29,7 +29,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join19.q.out b/ql/src/test/results/clientpositive/join19.q.out
index 91ddc75..67a796b 100644
--- a/ql/src/test/results/clientpositive/join19.q.out
+++ b/ql/src/test/results/clientpositive/join19.q.out
@@ -136,7 +136,7 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Citation')) and subject is not null) (type: boolean)
+              predicate: ((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Citation') and subject is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: subject (type: string)
@@ -167,7 +167,7 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_from') and object is not null) and subject is not null) (type: boolean)
+              predicate: ((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_from') and object is not null and subject is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: subject (type: string), object (type: string)
@@ -211,7 +211,7 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Author')) and subject is not null) (type: boolean)
+              predicate: ((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Author') and subject is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: subject (type: string)
@@ -226,7 +226,7 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_to') and subject is not null) and object is not null) (type: boolean)
+              predicate: ((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_to') and subject is not null and object is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: subject (type: string), object (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join4.q.out b/ql/src/test/results/clientpositive/join4.q.out
index 9c3babe..82b8568 100644
--- a/ql/src/test/results/clientpositive/join4.q.out
+++ b/ql/src/test/results/clientpositive/join4.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join42.q.out b/ql/src/test/results/clientpositive/join42.q.out
index 542408f..462e49e 100644
--- a/ql/src/test/results/clientpositive/join42.q.out
+++ b/ql/src/test/results/clientpositive/join42.q.out
@@ -144,7 +144,7 @@ STAGE PLANS:
             alias: la
             Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((loan_id = 4436) and aid is not null) and pi_id is not null) (type: boolean)
+              predicate: ((loan_id = 4436) and aid is not null and pi_id is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 14 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: aid (type: int), pi_id (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join5.q.out b/ql/src/test/results/clientpositive/join5.q.out
index 0398655..fa9c756 100644
--- a/ql/src/test/results/clientpositive/join5.q.out
+++ b/ql/src/test/results/clientpositive/join5.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out
index 493341d..d7e7cb1 100644
--- a/ql/src/test/results/clientpositive/join8.q.out
+++ b/ql/src/test/results/clientpositive/join8.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join_grp_diff_keys.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_grp_diff_keys.q.out b/ql/src/test/results/clientpositive/join_grp_diff_keys.q.out
index 53e33d1..17688a9 100644
--- a/ql/src/test/results/clientpositive/join_grp_diff_keys.q.out
+++ b/ql/src/test/results/clientpositive/join_grp_diff_keys.q.out
@@ -59,7 +59,7 @@ STAGE PLANS:
             alias: foo
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((orders <> 'blah') and id is not null) and line_id is not null) (type: boolean)
+              predicate: ((orders <> 'blah') and id is not null and line_id is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: id (type: int), line_id (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join_reorder2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_reorder2.q.out b/ql/src/test/results/clientpositive/join_reorder2.q.out
index 092c5bc..b713708 100644
--- a/ql/src/test/results/clientpositive/join_reorder2.q.out
+++ b/ql/src/test/results/clientpositive/join_reorder2.q.out
@@ -208,7 +208,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (key + 1) is not null) (type: boolean)
+              predicate: (key is not null and val is not null and (key + 1) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/join_reorder3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_reorder3.q.out b/ql/src/test/results/clientpositive/join_reorder3.q.out
index 2aa501e..8622263 100644
--- a/ql/src/test/results/clientpositive/join_reorder3.q.out
+++ b/ql/src/test/results/clientpositive/join_reorder3.q.out
@@ -208,7 +208,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (key + 1) is not null) (type: boolean)
+              predicate: (key is not null and val is not null and (key + 1) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string)


[48/58] [abbrv] hive git commit: Revert "HIVE-13505. Skip running TestDummy where possibe during precommit builds. (Siddharth Seth, reviewed by Ashutosh Chauhan)"

Posted by jd...@apache.org.
Revert "HIVE-13505. Skip running TestDummy where possibe during precommit builds. (Siddharth Seth, reviewed by Ashutosh Chauhan)"

This reverts commit a207923ff847b62209f97682c0e3e7a649ae131d.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cedb6de1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cedb6de1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cedb6de1

Branch: refs/heads/llap
Commit: cedb6de141e70244b0f9f2cc915dd338a31d070e
Parents: 418f936
Author: Szehon Ho <sz...@cloudera.com>
Authored: Thu Apr 14 14:31:51 2016 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Thu Apr 14 14:31:51 2016 -0700

----------------------------------------------------------------------
 testutils/ptest2/src/main/resources/source-prep.vm | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cedb6de1/testutils/ptest2/src/main/resources/source-prep.vm
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/source-prep.vm b/testutils/ptest2/src/main/resources/source-prep.vm
index 9c83a14..97fb69c 100644
--- a/testutils/ptest2/src/main/resources/source-prep.vm
+++ b/testutils/ptest2/src/main/resources/source-prep.vm
@@ -97,8 +97,10 @@ cd $workingDir/
       done
     #end
     mvn -B clean install -DskipTests -Dmaven.repo.local=$workingDir/maven $mavenArgs $mavenBuildArgs
+    mvn -B test -Dmaven.repo.local=$workingDir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
     cd itests
     mvn -B clean install -DskipTests -Dmaven.repo.local=$workingDir/maven $mavenArgs $mavenBuildArgs
+    mvn -B test -Dmaven.repo.local=$workingDir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
   elif [[ "${buildTool}" == "ant" ]]
   then
     ant $antArgs -Divy.default.ivy.user.dir=$workingDir/ivy \


[57/58] [abbrv] hive git commit: Revert "HIVE-13499 : TestJdbcWithMiniHS2 is hanging - temp patch to disable the test class"

Posted by jd...@apache.org.
Revert "HIVE-13499 : TestJdbcWithMiniHS2 is hanging - temp patch to disable the test class"

This reverts commit 98a7dd8c88969654baadcee65d34863e97835aa4.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2d282919
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2d282919
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2d282919

Branch: refs/heads/llap
Commit: 2d282919c9453846e6f3e29e0d43080e3cd9978a
Parents: 58c4e12
Author: Thejas Nair <th...@hortonworks.com>
Authored: Fri Apr 15 12:18:26 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Fri Apr 15 12:18:26 2016 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java    | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2d282919/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 857805a..10c8ff2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -64,10 +64,8 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 
-@Ignore("Disabling test until hanging issue is resolved.")
 public class TestJdbcWithMiniHS2 {
   private static MiniHS2 miniHS2 = null;
   private static String dataFileDir;


[16/58] [abbrv] hive git commit: HIVE-13420 : Clarify HS2 WebUI Query 'Elapsed Time' (Szehon, reviewed by Aihua Xu and Mohit Sabharwal)

Posted by jd...@apache.org.
HIVE-13420 : Clarify HS2 WebUI Query 'Elapsed Time' (Szehon, reviewed by Aihua Xu and Mohit Sabharwal)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/010157e9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/010157e9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/010157e9

Branch: refs/heads/llap
Commit: 010157e9ab16601b72431ad68efb763ea11c170c
Parents: 0ebd4d1
Author: Szehon Ho <sz...@cloudera.com>
Authored: Sun Apr 10 01:07:44 2016 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Sun Apr 10 01:07:44 2016 -0700

----------------------------------------------------------------------
 .../service/cli/session/TestQueryDisplay.java   |  2 ++
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon | 16 ++++++++------
 .../hive/service/cli/operation/Operation.java   |  2 +-
 .../service/cli/operation/SQLOperation.java     |  5 +++++
 .../cli/operation/SQLOperationDisplay.java      |  9 ++++++++
 .../hive-webapps/hiveserver2/hiveserver2.jsp    | 22 ++++++++++++--------
 6 files changed, 40 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/010157e9/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
index 418f71e..98581e0 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
@@ -133,8 +133,10 @@ public class TestQueryDisplay {
     if (finished) {
       Assert.assertTrue(display.getEndTime() > 0 && display.getEndTime() >= display.getBeginTime()
         && display.getEndTime() <= System.currentTimeMillis());
+      Assert.assertTrue(display.getRuntime() > 0);
     } else {
       Assert.assertNull(display.getEndTime());
+      //For runtime, query may have finished.
     }
 
     QueryDisplay qDisplay1 = display.getQueryDisplay();

http://git-wip-us.apache.org/repos/asf/hive/blob/010157e9/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
----------------------------------------------------------------------
diff --git a/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon b/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
index 8d51a73..690c6f3 100644
--- a/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
+++ b/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
@@ -129,7 +129,7 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
             <td><% sod.getQueryDisplay() == null ? "Unknown" : sod.getQueryDisplay().getQueryString() %></td>
         </tr>
         <tr>
-            <td>Query Id</td>
+            <td>Id</td>
             <td><% sod.getQueryDisplay() == null ? "Unknown" : sod.getQueryDisplay().getQueryId() %></td>
         </tr>
         <tr>
@@ -141,23 +141,27 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
             <td><% sod.getState() %></td>
         </tr>
         <tr>
-            <td>Begin Time</td>
+            <td>Opened Timestamp</td>
             <td><% new Date(sod.getBeginTime()) %></td>
         </tr>
         <tr>
-            <td>Elapsed Time (s)</td>
+            <td>Opened (s)</td>
             <td><% sod.getElapsedTime()/1000 %></td>
         </tr>
         <tr>
-            <td>End Time</td>
-            <td><% sod.getEndTime() == null ? "In Progress" : new Date(sod.getEndTime()) %></td>
+            <td>Closed Timestamp</td>
+            <td><% sod.getEndTime() == null ? "Open" : new Date(sod.getEndTime()) %></td>
         </tr>
         <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getErrorMessage() != null %>
             <tr>
                 <td>Error</td>
-                <td><% sod.getEndTime() == null ? "In Progress" : new Date(sod.getEndTime()) %></td>
+                <td><% sod.getQueryDisplay().getErrorMessage() %></td>
             </tr>
         </%if>
+        <tr>
+            <td>Latency (s)</td>
+            <td><% sod.getRuntime()/1000 %></td>
+        </tr>
     </table>
 </%def>
 

http://git-wip-us.apache.org/repos/asf/hive/blob/010157e9/service/src/java/org/apache/hive/service/cli/operation/Operation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
index d9a273b..b7d6549 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
@@ -446,7 +446,7 @@ public abstract class Operation {
   protected void onNewState(OperationState state, OperationState prevState) {
     switch(state) {
       case RUNNING:
-      markOperationStartTime();
+        markOperationStartTime();
         break;
       case ERROR:
       case FINISHED:

http://git-wip-us.apache.org/repos/asf/hive/blob/010157e9/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 04d816a..9ce6055 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -556,6 +556,11 @@ public class SQLOperation extends ExecuteStatementOperation {
       }
     }
 
+    if (state == OperationState.FINISHED || state == OperationState.CANCELED || state == OperationState.ERROR) {
+      //update runtime
+      sqlOpDisplay.setRuntime(getOperationComplete() - getOperationStart());
+    }
+
     if (state == OperationState.CLOSED) {
       sqlOpDisplay.closed();
     } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/010157e9/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
index d2ca1e7..fe93426 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperationDisplay.java
@@ -32,6 +32,7 @@ public class SQLOperationDisplay {
   public final String executionEngine;
   public final long beginTime;
   public final String operationId;
+  public Long runtime;  //tracks only running portion of the query.
 
   public Long endTime;
   public OperationState state;
@@ -96,4 +97,12 @@ public class SQLOperationDisplay {
   public synchronized void closed() {
     this.endTime = System.currentTimeMillis();
   }
+
+  public synchronized void setRuntime(long runtime) {
+    this.runtime = runtime;
+  }
+
+  public synchronized Long getRuntime() {
+    return runtime;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/010157e9/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
----------------------------------------------------------------------
diff --git a/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp b/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
index 8b46550..293a8ef 100644
--- a/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
+++ b/service/src/resources/hive-webapps/hiveserver2/hiveserver2.jsp
@@ -125,15 +125,16 @@ for (HiveSession hiveSession: hiveSessions) {
 </section>
 
 <section>
-<h2>Queries</h2>
+<h2>Open Queries</h2>
 <table id="attributes_table" class="table table-striped">
     <tr>
         <th>User Name</th>
         <th>Query</th>
         <th>Execution Engine</th>
         <th>State</th>
-        <th>Begin Time</th>
-        <th>Elapsed Time (s)</th>
+        <th>Opened Timestamp</th>
+        <th>Opened (s)</th>
+        <th>Latency (s)</th>
         <th>Drilldown Link</th>
     </tr>
     <%
@@ -149,30 +150,32 @@ for (HiveSession hiveSession: hiveSessions) {
         <td><%= operation.getState() %></td>
         <td><%= new Date(operation.getBeginTime()) %></td>
         <td><%= operation.getElapsedTime()/1000 %></td>
+        <td><%= operation.getRuntime() == null ? "Not finished" : operation.getRuntime()/1000 %></td>
         <% String link = "/query_page?operationId=" + operation.getOperationId(); %>
-        <td>  <a href= <%= link %>>Query Drilldown</a> </td>
+        <td>  <a href= <%= link %>>Drilldown</a> </td>
     </tr>
 
 <%
   }
 %>
 <tr>
-  <td colspan="7">Total number of queries: <%= queries %></td>
+  <td colspan="8">Total number of queries: <%= queries %></td>
 </tr>
 </table>
 </section>
 
 
 <section>
-<h2>Last Max <%= conf.get(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES.varname) %> Completed Queries</h2>
+<h2>Last Max <%= conf.get(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES.varname) %> Closed Queries</h2>
 <table id="attributes_table" class="table table-striped">
     <tr>
         <th>User Name</th>
         <th>Query</th>
         <th>Execution Engine</th>
         <th>State</th>
-        <th>Elapsed Time (s)</th>
-        <th>End Time</th>
+        <th>Opened (s)</th>
+        <th>Closed Timestamp</th>
+        <th>Latency (s)</th>
         <th>Drilldown Link</th>
     </tr>
     <%
@@ -188,8 +191,9 @@ for (HiveSession hiveSession: hiveSessions) {
         <td><%= operation.getState() %></td>
         <td><%= operation.getElapsedTime()/1000 %></td>
         <td><%= operation.getEndTime() == null ? "In Progress" : new Date(operation.getEndTime()) %></td>
+        <td><%= operation.getRuntime()/1000 %></td>
         <% String link = "/query_page?operationId=" + operation.getOperationId(); %>
-        <td>  <a href= <%= link %>>Query Drilldown</a> </td>
+        <td>  <a href= <%= link %>>Drilldown</a> </td>
     </tr>
 
 <%


[30/58] [abbrv] hive git commit: HIVE-13492 : TestMiniSparkOnYarnCliDriver.testCliDriver_index_bitmap3 is failing on master (Ashutosh Chauhan via Szehon Ho)

Posted by jd...@apache.org.
HIVE-13492 : TestMiniSparkOnYarnCliDriver.testCliDriver_index_bitmap3 is failing on master (Ashutosh Chauhan via Szehon Ho)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/94c19741
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/94c19741
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/94c19741

Branch: refs/heads/llap
Commit: 94c19741bd767880e8ab1ae3232e6ba06232e443
Parents: b507520
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue Apr 12 15:47:19 2016 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Apr 12 18:11:06 2016 -0700

----------------------------------------------------------------------
 .../clientpositive/spark/index_bitmap3.q.out    | 34 ++++++++++----------
 1 file changed, 17 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/94c19741/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
index d0ed328..b77966d 100644
--- a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
+++ b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
@@ -110,45 +110,45 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
-        Reducer 3 <- Reducer 2 (GROUP, 2)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 4 (PARTITION-LEVEL SORT, 4)
+        Reducer 3 <- Reducer 2 (GROUP, 4)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
                   alias: default__src_src1_index__
-                  Statistics: Num rows: 500 Data size: 56811 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 28405 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 28405 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: bigint)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 28405 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: array<bigint>)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: default__src_src2_index__
-                  Statistics: Num rows: 500 Data size: 58811 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 29405 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 29405 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: bigint)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 29405 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: array<bigint>)
         Reducer 2 
             Reduce Operator Tree:
@@ -159,25 +159,25 @@ STAGE PLANS:
                   0 _col0 (type: string), _col1 (type: bigint)
                   1 _col0 (type: string), _col1 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col5
-                Statistics: Num rows: 275 Data size: 31245 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean)
-                  Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string), _col1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: collect_set(_col1)
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: array<bigint>)
         Reducer 3 
             Reduce Operator Tree:
@@ -186,10 +186,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 69 Data size: 7839 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 69 Data size: 7839 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat


[41/58] [abbrv] hive git commit: HIVE-12159: Create vectorized readers for the complex types (Owen O'Malley, reviewed by Matt McCline)

Posted by jd...@apache.org.
HIVE-12159: Create vectorized readers for the complex types (Owen O'Malley, reviewed by Matt McCline)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0dd4621f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0dd4621f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0dd4621f

Branch: refs/heads/llap
Commit: 0dd4621f34f6043071474220a082268cda124b9d
Parents: 529580f
Author: Matt McCline <mm...@hortonworks.com>
Authored: Wed Apr 13 22:39:10 2016 -0700
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Wed Apr 13 22:39:10 2016 -0700

----------------------------------------------------------------------
 .../llap/io/decode/OrcEncodedDataConsumer.java  |   45 +-
 orc/src/java/org/apache/orc/OrcUtils.java       |   75 +
 orc/src/java/org/apache/orc/Reader.java         |    6 +
 orc/src/java/org/apache/orc/RecordReader.java   |    8 +-
 .../java/org/apache/orc/TypeDescription.java    |   62 +-
 .../org/apache/orc/impl/BitFieldReader.java     |    5 +-
 .../java/org/apache/orc/impl/IntegerReader.java |   26 +-
 .../apache/orc/impl/RunLengthByteReader.java    |   36 +-
 .../apache/orc/impl/RunLengthIntegerReader.java |   31 +-
 .../orc/impl/RunLengthIntegerReaderV2.java      |   33 +-
 .../java/org/apache/orc/impl/WriterImpl.java    |   47 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |   13 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |   43 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java      |    3 +-
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |   12 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   50 +-
 .../hadoop/hive/ql/io/orc/SchemaEvolution.java  |  234 +--
 .../hive/ql/io/orc/TreeReaderFactory.java       |  838 +++++----
 .../ql/io/orc/VectorizedOrcInputFormat.java     |   32 +-
 .../hadoop/hive/ql/io/orc/WriterImpl.java       |    2 -
 .../hive/ql/io/orc/TestTypeDescription.java     |    4 +-
 .../hive/ql/io/orc/TestVectorOrcFile.java       | 1634 +++++++++---------
 .../hive/ql/io/orc/TestVectorizedORCReader.java |    7 +-
 .../hive/ql/exec/vector/BytesColumnVector.java  |   11 +
 .../ql/exec/vector/TimestampColumnVector.java   |    2 +-
 .../hive/ql/exec/vector/UnionColumnVector.java  |    2 -
 26 files changed, 1785 insertions(+), 1476 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
index 7ee263d..baaa4d7 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hive.llap.io.decode;
 
 import java.io.IOException;
+import java.util.List;
 
 import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch;
 import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
@@ -27,7 +28,12 @@ import org.apache.hadoop.hive.llap.io.api.impl.ColumnVectorBatch;
 import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
 import org.apache.hadoop.hive.llap.io.metadata.OrcStripeMetadata;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
+import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.orc.CompressionCodec;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
@@ -71,6 +77,35 @@ public class OrcEncodedDataConsumer
     stripes[m.getStripeIx()] = m;
   }
 
+  private static ColumnVector createColumn(OrcProto.Type type,
+                                           int batchSize) {
+    switch (type.getKind()) {
+      case BOOLEAN:
+      case BYTE:
+      case SHORT:
+      case INT:
+      case LONG:
+      case DATE:
+        return new LongColumnVector(batchSize);
+      case FLOAT:
+      case DOUBLE:
+        return new DoubleColumnVector(batchSize);
+      case BINARY:
+      case STRING:
+      case CHAR:
+      case VARCHAR:
+        return new BytesColumnVector(batchSize);
+      case TIMESTAMP:
+        return new TimestampColumnVector(batchSize);
+      case DECIMAL:
+        return new DecimalColumnVector(batchSize, type.getPrecision(),
+            type.getScale());
+      default:
+        throw new IllegalArgumentException("LLAP does not support " +
+            type.getKind());
+    }
+  }
+
   @Override
   protected void decodeBatch(OrcEncodedColumnBatch batch,
       Consumer<ColumnVectorBatch> downstreamConsumer) {
@@ -112,9 +147,15 @@ public class OrcEncodedDataConsumer
         ColumnVectorBatch cvb = cvbPool.take();
         assert cvb.cols.length == batch.getColumnIxs().length; // Must be constant per split.
         cvb.size = batchSize;
-
+        List<OrcProto.Type> types = fileMetadata.getTypes();
+        int[] columnMapping = batch.getColumnIxs();
         for (int idx = 0; idx < batch.getColumnIxs().length; idx++) {
-          cvb.cols[idx] = (ColumnVector)columnReaders[idx].nextVector(cvb.cols[idx], batchSize);
+          if (cvb.cols[idx] == null) {
+            // skip over the top level struct, but otherwise assume no complex
+            // types
+            cvb.cols[idx] = createColumn(types.get(columnMapping[idx]), batchSize);
+          }
+          columnReaders[idx].nextVector(cvb.cols[idx], null, batchSize);
         }
 
         // we are done reading a batch, send it to consumer for processing

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/OrcUtils.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/OrcUtils.java b/orc/src/java/org/apache/orc/OrcUtils.java
index 2e93254..2ebe9a7 100644
--- a/orc/src/java/org/apache/orc/OrcUtils.java
+++ b/orc/src/java/org/apache/orc/OrcUtils.java
@@ -449,4 +449,79 @@ public class OrcUtils {
     return columnId;
   }
 
+  /**
+   * Translate the given rootColumn from the list of types to a TypeDescription.
+   * @param types all of the types
+   * @param rootColumn translate this type
+   * @return a new TypeDescription that matches the given rootColumn
+   */
+  public static
+        TypeDescription convertTypeFromProtobuf(List<OrcProto.Type> types,
+                                                int rootColumn) {
+    OrcProto.Type type = types.get(rootColumn);
+    switch (type.getKind()) {
+      case BOOLEAN:
+        return TypeDescription.createBoolean();
+      case BYTE:
+        return TypeDescription.createByte();
+      case SHORT:
+        return TypeDescription.createShort();
+      case INT:
+        return TypeDescription.createInt();
+      case LONG:
+        return TypeDescription.createLong();
+      case FLOAT:
+        return TypeDescription.createFloat();
+      case DOUBLE:
+        return TypeDescription.createDouble();
+      case STRING:
+        return TypeDescription.createString();
+      case CHAR:
+        return TypeDescription.createChar()
+            .withMaxLength(type.getMaximumLength());
+      case VARCHAR:
+        return TypeDescription.createVarchar()
+            .withMaxLength(type.getMaximumLength());
+      case BINARY:
+        return TypeDescription.createBinary();
+      case TIMESTAMP:
+        return TypeDescription.createTimestamp();
+      case DATE:
+        return TypeDescription.createDate();
+      case DECIMAL: {
+        TypeDescription result = TypeDescription.createDecimal();
+        if (type.hasScale()) {
+          result.withScale(type.getScale());
+        }
+        if (type.hasPrecision()) {
+          result.withPrecision(type.getPrecision());
+        }
+        return result;
+      }
+      case LIST:
+        return TypeDescription.createList(
+            convertTypeFromProtobuf(types, type.getSubtypes(0)));
+      case MAP:
+        return TypeDescription.createMap(
+            convertTypeFromProtobuf(types, type.getSubtypes(0)),
+            convertTypeFromProtobuf(types, type.getSubtypes(1)));
+      case STRUCT: {
+        TypeDescription result = TypeDescription.createStruct();
+        for(int f=0; f < type.getSubtypesCount(); ++f) {
+          result.addField(type.getFieldNames(f),
+              convertTypeFromProtobuf(types, type.getSubtypes(f)));
+        }
+        return result;
+      }
+      case UNION: {
+        TypeDescription result = TypeDescription.createUnion();
+        for(int f=0; f < type.getSubtypesCount(); ++f) {
+          result.addUnionChild(
+              convertTypeFromProtobuf(types, type.getSubtypes(f)));
+        }
+        return result;
+      }
+    }
+    throw new IllegalArgumentException("Unknown ORC type " + type.getKind());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/Reader.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/Reader.java b/orc/src/java/org/apache/orc/Reader.java
index be722b5..62a05e9 100644
--- a/orc/src/java/org/apache/orc/Reader.java
+++ b/orc/src/java/org/apache/orc/Reader.java
@@ -116,9 +116,15 @@ public interface Reader {
   ColumnStatistics[] getStatistics();
 
   /**
+   * Get the type of rows in this ORC file.
+   */
+  TypeDescription getSchema();
+
+  /**
    * Get the list of types contained in the file. The root type is the first
    * type in the list.
    * @return the list of flattened types
+   * @deprecated use getSchema instead
    */
   List<OrcProto.Type> getTypes();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/RecordReader.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/RecordReader.java b/orc/src/java/org/apache/orc/RecordReader.java
index 7229dda..09ba0f0 100644
--- a/orc/src/java/org/apache/orc/RecordReader.java
+++ b/orc/src/java/org/apache/orc/RecordReader.java
@@ -30,13 +30,11 @@ public interface RecordReader {
    * controlled by the callers. Caller need to look at
    * VectorizedRowBatch.size of the retunred object to know the batch
    * size read.
-   * @param previousBatch a row batch object that can be reused by the reader
-   * @return the row batch that was read. The batch will have a non-zero row
-   *         count if the pointer isn't at the end of the file
+   * @param batch a row batch object to read into
+   * @return were more rows available to read?
    * @throws java.io.IOException
    */
-  VectorizedRowBatch nextBatch(VectorizedRowBatch previousBatch
-			       ) throws IOException;
+  boolean nextBatch(VectorizedRowBatch batch) throws IOException;
 
   /**
    * Get the row number of the row that will be returned by the following

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/TypeDescription.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/TypeDescription.java b/orc/src/java/org/apache/orc/TypeDescription.java
index bd900ac..b8e057e 100644
--- a/orc/src/java/org/apache/orc/TypeDescription.java
+++ b/orc/src/java/org/apache/orc/TypeDescription.java
@@ -61,7 +61,7 @@ public class TypeDescription {
     LIST("array", false),
     MAP("map", false),
     STRUCT("struct", false),
-    UNION("union", false);
+    UNION("uniontype", false);
 
     Category(String name, boolean isPrimitive) {
       this.name = name;
@@ -258,6 +258,66 @@ public class TypeDescription {
     return id;
   }
 
+  public TypeDescription clone() {
+    TypeDescription result = new TypeDescription(category);
+    result.maxLength = maxLength;
+    result.precision = precision;
+    result.scale = scale;
+    if (fieldNames != null) {
+      result.fieldNames.addAll(fieldNames);
+    }
+    if (children != null) {
+      for(TypeDescription child: children) {
+        TypeDescription clone = child.clone();
+        clone.parent = result;
+        result.children.add(clone);
+      }
+    }
+    return result;
+  }
+
+  @Override
+  public int hashCode() {
+    return getId();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null || other.getClass() != TypeDescription.class) {
+      return false;
+    }
+    if (other == this) {
+      return true;
+    }
+    TypeDescription castOther = (TypeDescription) other;
+    if (category != castOther.category ||
+        getId() != castOther.getId() ||
+        getMaximumId() != castOther.getMaximumId() ||
+        maxLength != castOther.maxLength ||
+        scale != castOther.scale ||
+        precision != castOther.precision) {
+      return false;
+    }
+    if (children != null) {
+      if (children.size() != castOther.children.size()) {
+        return false;
+      }
+      for (int i = 0; i < children.size(); ++i) {
+        if (!children.get(i).equals(castOther.children.get(i))) {
+          return false;
+        }
+      }
+    }
+    if (category == Category.STRUCT) {
+      for(int i=0; i < fieldNames.size(); ++i) {
+        if (!fieldNames.get(i).equals(castOther.fieldNames.get(i))) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
   /**
    * Get the maximum id assigned to this type or its children.
    * The first call will cause all of the the ids in tree to be assigned, so

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/impl/BitFieldReader.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/BitFieldReader.java b/orc/src/java/org/apache/orc/impl/BitFieldReader.java
index 8d9d3cb..dda7355 100644
--- a/orc/src/java/org/apache/orc/impl/BitFieldReader.java
+++ b/orc/src/java/org/apache/orc/impl/BitFieldReader.java
@@ -137,7 +137,7 @@ public class BitFieldReader {
                          long previousLen) throws IOException {
     previous.isRepeating = true;
     for (int i = 0; i < previousLen; i++) {
-      if (!previous.isNull[i]) {
+      if (previous.noNulls || !previous.isNull[i]) {
         previous.vector[i] = next();
       } else {
         // The default value of null for int types in vectorized
@@ -150,7 +150,8 @@ public class BitFieldReader {
       // when determining the isRepeating flag.
       if (previous.isRepeating
           && i > 0
-          && ((previous.vector[i - 1] != previous.vector[i]) || (previous.isNull[i - 1] != previous.isNull[i]))) {
+          && ((previous.vector[0] != previous.vector[i]) ||
+          (previous.isNull[0] != previous.isNull[i]))) {
         previous.isRepeating = false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/impl/IntegerReader.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/IntegerReader.java b/orc/src/java/org/apache/orc/impl/IntegerReader.java
index 7dfd289..8bef0f1 100644
--- a/orc/src/java/org/apache/orc/impl/IntegerReader.java
+++ b/orc/src/java/org/apache/orc/impl/IntegerReader.java
@@ -20,7 +20,7 @@ package org.apache.orc.impl;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 
 /**
  * Interface for reading integers.
@@ -57,9 +57,25 @@ public interface IntegerReader {
 
   /**
    * Return the next available vector for values.
-   * @return
+   * @param column the column being read
+   * @param data the vector to read into
+   * @param length the number of numbers to read
+   * @throws IOException
+   */
+   void nextVector(ColumnVector column,
+                   long[] data,
+                   int length
+                   ) throws IOException;
+
+  /**
+   * Return the next available vector for values. Does not change the
+   * value of column.isRepeating.
+   * @param column the column being read
+   * @param data the vector to read into
+   * @param length the number of numbers to read
    * @throws IOException
    */
-   void nextVector(LongColumnVector previous, final int previousLen)
-      throws IOException;
-}
+  void nextVector(ColumnVector column,
+                  int[] data,
+                  int length
+                  ) throws IOException;}

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/impl/RunLengthByteReader.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/RunLengthByteReader.java b/orc/src/java/org/apache/orc/impl/RunLengthByteReader.java
index 380f3391..24bd051 100644
--- a/orc/src/java/org/apache/orc/impl/RunLengthByteReader.java
+++ b/orc/src/java/org/apache/orc/impl/RunLengthByteReader.java
@@ -20,7 +20,7 @@ package org.apache.orc.impl;
 import java.io.EOFException;
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 
 /**
  * A reader that reads a sequence of bytes. A control byte is read before
@@ -92,16 +92,16 @@ public class RunLengthByteReader {
     return result;
   }
 
-  public void nextVector(LongColumnVector previous, long previousLen)
+  public void nextVector(ColumnVector previous, long[] data, long size)
       throws IOException {
     previous.isRepeating = true;
-    for (int i = 0; i < previousLen; i++) {
+    for (int i = 0; i < size; i++) {
       if (!previous.isNull[i]) {
-        previous.vector[i] = next();
+        data[i] = next();
       } else {
         // The default value of null for int types in vectorized
         // processing is 1, so set that if the value is null
-        previous.vector[i] = 1;
+        data[i] = 1;
       }
 
       // The default value for nulls in Vectorization for int types is 1
@@ -109,12 +109,36 @@ public class RunLengthByteReader {
       // when determining the isRepeating flag.
       if (previous.isRepeating
           && i > 0
-          && ((previous.vector[i - 1] != previous.vector[i]) || (previous.isNull[i - 1] != previous.isNull[i]))) {
+          && ((data[0] != data[i]) ||
+              (previous.isNull[0] != previous.isNull[i]))) {
         previous.isRepeating = false;
       }
     }
   }
 
+  /**
+   * Read the next size bytes into the data array, skipping over any slots
+   * where isNull is true.
+   * @param isNull if non-null, skip any rows where isNull[r] is true
+   * @param data the array to read into
+   * @param size the number of elements to read
+   * @throws IOException
+   */
+  public void nextVector(boolean[] isNull, int[] data,
+                         long size) throws IOException {
+    if (isNull == null) {
+      for(int i=0; i < size; ++i) {
+        data[i] = next();
+      }
+    } else {
+      for(int i=0; i < size; ++i) {
+        if (!isNull[i]) {
+          data[i] = next();
+        }
+      }
+    }
+  }
+
   public void seek(PositionProvider index) throws IOException {
     input.seek(index);
     int consumed = (int) index.getNext();

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
index 0c90cde..b91a263 100644
--- a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
+++ b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReader.java
@@ -20,7 +20,7 @@ package org.apache.orc.impl;
 import java.io.EOFException;
 import java.io.IOException;
 
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 
 /**
  * A reader that reads a sequence of integers.
@@ -99,15 +99,17 @@ public class RunLengthIntegerReader implements IntegerReader {
   }
 
   @Override
-  public void nextVector(LongColumnVector previous, final int previousLen) throws IOException {
+  public void nextVector(ColumnVector previous,
+                         long[] data,
+                         int previousLen) throws IOException {
     previous.isRepeating = true;
     for (int i = 0; i < previousLen; i++) {
       if (!previous.isNull[i]) {
-        previous.vector[i] = next();
+        data[i] = next();
       } else {
         // The default value of null for int type in vectorized
         // processing is 1, so set that if the value is null
-        previous.vector[i] = 1;
+        data[i] = 1;
       }
 
       // The default value for nulls in Vectorization for int types is 1
@@ -115,13 +117,32 @@ public class RunLengthIntegerReader implements IntegerReader {
       // when determining the isRepeating flag.
       if (previous.isRepeating
           && i > 0
-          && (previous.vector[i - 1] != previous.vector[i] || previous.isNull[i - 1] != previous.isNull[i])) {
+          && (data[0] != data[i] || previous.isNull[0] != previous.isNull[i])) {
         previous.isRepeating = false;
       }
     }
   }
 
   @Override
+  public void nextVector(ColumnVector vector,
+                         int[] data,
+                         int size) throws IOException {
+    if (vector.noNulls) {
+      for(int r=0; r < data.length && r < size; ++r) {
+        data[r] = (int) next();
+      }
+    } else if (!(vector.isRepeating && vector.isNull[0])) {
+      for(int r=0; r < data.length && r < size; ++r) {
+        if (!vector.isNull[r]) {
+          data[r] = (int) next();
+        } else {
+          data[r] = 1;
+        }
+      }
+    }
+  }
+
+  @Override
   public void seek(PositionProvider index) throws IOException {
     input.seek(index);
     int consumed = (int) index.getNext();

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
index c6d685a..610d9b5 100644
--- a/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
+++ b/orc/src/java/org/apache/orc/impl/RunLengthIntegerReaderV2.java
@@ -21,9 +21,9 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.util.Arrays;
 
+import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 
 /**
  * A reader that reads a sequence of light weight compressed integers. Refer
@@ -360,15 +360,17 @@ public class RunLengthIntegerReaderV2 implements IntegerReader {
   }
 
   @Override
-  public void nextVector(LongColumnVector previous, final int previousLen) throws IOException {
+  public void nextVector(ColumnVector previous,
+                         long[] data,
+                         int previousLen) throws IOException {
     previous.isRepeating = true;
     for (int i = 0; i < previousLen; i++) {
       if (!previous.isNull[i]) {
-        previous.vector[i] = next();
+        data[i] = next();
       } else {
         // The default value of null for int type in vectorized
         // processing is 1, so set that if the value is null
-        previous.vector[i] = 1;
+        data[i] = 1;
       }
 
       // The default value for nulls in Vectorization for int types is 1
@@ -376,10 +378,29 @@ public class RunLengthIntegerReaderV2 implements IntegerReader {
       // when determining the isRepeating flag.
       if (previous.isRepeating
           && i > 0
-          && (previous.vector[i - 1] != previous.vector[i] ||
-          previous.isNull[i - 1] != previous.isNull[i])) {
+          && (data[0] != data[i] ||
+          previous.isNull[0] != previous.isNull[i])) {
         previous.isRepeating = false;
       }
     }
   }
+
+  @Override
+  public void nextVector(ColumnVector vector,
+                         int[] data,
+                         int size) throws IOException {
+    if (vector.noNulls) {
+      for(int r=0; r < data.length && r < size; ++r) {
+        data[r] = (int) next();
+      }
+    } else if (!(vector.isRepeating && vector.isNull[0])) {
+      for(int r=0; r < data.length && r < size; ++r) {
+        if (!vector.isNull[r]) {
+          data[r] = (int) next();
+        } else {
+          data[r] = 1;
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/orc/src/java/org/apache/orc/impl/WriterImpl.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/WriterImpl.java b/orc/src/java/org/apache/orc/impl/WriterImpl.java
index f8afe06..b2966e0 100644
--- a/orc/src/java/org/apache/orc/impl/WriterImpl.java
+++ b/orc/src/java/org/apache/orc/impl/WriterImpl.java
@@ -1693,9 +1693,10 @@ public class WriterImpl implements Writer, MemoryManager.Callback {
     }
   }
 
+  public static long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
+  public static long NANOS_PER_MILLI = 1000000;
   public static final int MILLIS_PER_SECOND = 1000;
   static final int NANOS_PER_SECOND = 1000000000;
-  static final int MILLIS_PER_NANO  = 1000000;
   public static final String BASE_TIMESTAMP_STRING = "2015-01-01 00:00:00";
 
   private static class TimestampTreeWriter extends TreeWriter {
@@ -2261,32 +2262,36 @@ public class WriterImpl implements Writer, MemoryManager.Callback {
         }
       } else {
         // write the records in runs of the same tag
-        byte prevTag = 0;
-        int currentRun = 0;
-        boolean started = false;
+        int[] currentStart = new int[vec.fields.length];
+        int[] currentLength = new int[vec.fields.length];
         for(int i=0; i < length; ++i) {
-          if (!vec.isNull[i + offset]) {
+          // only need to deal with the non-nulls, since the nulls were dealt
+          // with in the super method.
+          if (vec.noNulls || !vec.isNull[i + offset]) {
             byte tag = (byte) vec.tags[offset + i];
             tags.write(tag);
-            if (!started) {
-              started = true;
-              currentRun = i;
-              prevTag = tag;
-            } else if (tag != prevTag) {
-              childrenWriters[prevTag].writeBatch(vec.fields[prevTag],
-                  offset + currentRun, i - currentRun);
-              currentRun = i;
-              prevTag = tag;
+            if (currentLength[tag] == 0) {
+              // start a new sequence
+              currentStart[tag] = i + offset;
+              currentLength[tag] = 1;
+            } else if (currentStart[tag] + currentLength[tag] == i + offset) {
+              // ok, we are extending the current run for that tag.
+              currentLength[tag] += 1;
+            } else {
+              // otherwise, we need to close off the old run and start a new one
+              childrenWriters[tag].writeBatch(vec.fields[tag],
+                  currentStart[tag], currentLength[tag]);
+              currentStart[tag] = i + offset;
+              currentLength[tag] = 1;
             }
-          } else if (started) {
-            started = false;
-            childrenWriters[prevTag].writeBatch(vec.fields[prevTag],
-                offset + currentRun, i - currentRun);
           }
         }
-        if (started) {
-          childrenWriters[prevTag].writeBatch(vec.fields[prevTag],
-              offset + currentRun, length - currentRun);
+        // write out any left over sequences
+        for(int tag=0; tag < currentStart.length; ++tag) {
+          if (currentLength[tag] != 0) {
+            childrenWriters[tag].writeBatch(vec.fields[tag], currentStart[tag],
+                currentLength[tag]);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
index 0724191..82a97e0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java
@@ -215,12 +215,9 @@ public class VectorizedRowBatchCtx {
     LOG.info("createVectorizedRowBatch columnsToIncludeTruncated " + Arrays.toString(columnsToIncludeTruncated));
     int totalColumnCount = rowColumnTypeInfos.length + scratchColumnTypeNames.length;
     VectorizedRowBatch result = new VectorizedRowBatch(totalColumnCount);
-
-    for (int i = 0; i < columnsToIncludeTruncated.length; i++) {
-      if (columnsToIncludeTruncated[i]) {
-        TypeInfo typeInfo = rowColumnTypeInfos[i];
-        result.cols[i] = VectorizedBatchUtil.createColumnVector(typeInfo);
-      }
+    for (int i = 0; i < dataColumnCount; i++) {
+      TypeInfo typeInfo = rowColumnTypeInfos[i];
+      result.cols[i] = VectorizedBatchUtil.createColumnVector(typeInfo);
     }
 
     for (int i = dataColumnCount; i < dataColumnCount + partitionColumnCount; i++) {
@@ -476,8 +473,8 @@ public class VectorizedRowBatchCtx {
             bcv.isNull[0] = true;
             bcv.isRepeating = true;
           } else {
-            bcv.fill(sVal.getBytes());
-            bcv.isNull[0] = false;
+            bcv.setVal(0, sVal.getBytes());
+            bcv.isRepeating = true;
           }
         }
         break;

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index fe0be7b..fcb8ca4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -301,7 +301,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     /**
      * Do we have schema on read in the configuration variables?
      */
-    TypeDescription schema = getDesiredRowTypeDescr(conf, /* isAcidRead */ false);
+    TypeDescription schema = getDesiredRowTypeDescr(conf, false, Integer.MAX_VALUE);
 
     Reader.Options options = new Reader.Options().range(offset, length);
     options.schema(schema);
@@ -1743,7 +1743,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     /**
      * Do we have schema on read in the configuration variables?
      */
-    TypeDescription schema = getDesiredRowTypeDescr(conf, /* isAcidRead */ true);
+    TypeDescription schema = getDesiredRowTypeDescr(conf, true, Integer.MAX_VALUE);
 
     final Reader reader;
     final int bucket;
@@ -1994,10 +1994,13 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   /**
    * Convert a Hive type property string that contains separated type names into a list of
    * TypeDescription objects.
+   * @param hiveTypeProperty the desired types from hive
+   * @param maxColumns the maximum number of desired columns
    * @return the list of TypeDescription objects.
    */
-  public static ArrayList<TypeDescription> typeDescriptionsFromHiveTypeProperty(
-      String hiveTypeProperty) {
+  public static ArrayList<TypeDescription>
+      typeDescriptionsFromHiveTypeProperty(String hiveTypeProperty,
+                                           int maxColumns) {
 
     // CONSDIER: We need a type name parser for TypeDescription.
 
@@ -2005,6 +2008,9 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     ArrayList<TypeDescription> typeDescrList =new ArrayList<TypeDescription>(typeInfoList.size());
     for (TypeInfo typeInfo : typeInfoList) {
       typeDescrList.add(convertTypeInfo(typeInfo));
+      if (typeDescrList.size() >= maxColumns) {
+        break;
+      }
     }
     return typeDescrList;
   }
@@ -2091,8 +2097,18 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     }
   }
 
-  public static TypeDescription getDesiredRowTypeDescr(Configuration conf, boolean isAcidRead)
-      throws IOException {
+  /**
+   * Generate the desired schema for reading the file.
+   * @param conf the configuration
+   * @param isAcidRead is this an acid format?
+   * @param dataColumns the desired number of data columns for vectorized read
+   * @return the desired schema or null if schema evolution isn't enabled
+   * @throws IOException
+   */
+  public static TypeDescription getDesiredRowTypeDescr(Configuration conf,
+                                                       boolean isAcidRead,
+                                                       int dataColumns
+                                                       ) throws IOException {
 
     String columnNameProperty = null;
     String columnTypeProperty = null;
@@ -2115,8 +2131,10 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
           haveSchemaEvolutionProperties = false;
         } else {
           schemaEvolutionTypeDescrs =
-              typeDescriptionsFromHiveTypeProperty(columnTypeProperty);
-          if (schemaEvolutionTypeDescrs.size() != schemaEvolutionColumnNames.size()) {
+              typeDescriptionsFromHiveTypeProperty(columnTypeProperty,
+                  dataColumns);
+          if (schemaEvolutionTypeDescrs.size() !=
+              Math.min(dataColumns, schemaEvolutionColumnNames.size())) {
             haveSchemaEvolutionProperties = false;
           }
         }
@@ -2147,8 +2165,9 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         return null;
       }
       schemaEvolutionTypeDescrs =
-          typeDescriptionsFromHiveTypeProperty(columnTypeProperty);
-      if (schemaEvolutionTypeDescrs.size() != schemaEvolutionColumnNames.size()) {
+          typeDescriptionsFromHiveTypeProperty(columnTypeProperty, dataColumns);
+      if (schemaEvolutionTypeDescrs.size() !=
+          Math.min(dataColumns, schemaEvolutionColumnNames.size())) {
         return null;
       }
 
@@ -2162,7 +2181,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         }
         columnNum++;
       }
-      if (virtualColumnClipNum != -1) {
+      if (virtualColumnClipNum != -1 && virtualColumnClipNum < dataColumns) {
         schemaEvolutionColumnNames =
             Lists.newArrayList(schemaEvolutionColumnNames.subList(0, virtualColumnClipNum));
         schemaEvolutionTypeDescrs = Lists.newArrayList(schemaEvolutionTypeDescrs.subList(0, virtualColumnClipNum));
@@ -2179,7 +2198,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
 
     // Desired schema does not include virtual columns or partition columns.
     TypeDescription result = TypeDescription.createStruct();
-    for (int i = 0; i < schemaEvolutionColumnNames.size(); i++) {
+    for (int i = 0; i < schemaEvolutionTypeDescrs.size(); i++) {
       result.addField(schemaEvolutionColumnNames.get(i), schemaEvolutionTypeDescrs.get(i));
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
index 1fce282..0dd58b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
@@ -447,7 +447,8 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
     this.length = options.getLength();
     this.validTxnList = validTxnList;
 
-    TypeDescription typeDescr = OrcInputFormat.getDesiredRowTypeDescr(conf, /* isAcidRead */ true);
+    TypeDescription typeDescr =
+        OrcInputFormat.getDesiredRowTypeDescr(conf, true, Integer.MAX_VALUE);
 
     objectInspector = OrcRecordUpdater.createEventSchema
         (OrcStruct.createObjectInspector(0, OrcUtils.getOrcTypes(typeDescr)));

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index a031a92..0bcf9e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -26,6 +26,8 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.orc.OrcUtils;
+import org.apache.orc.TypeDescription;
 import org.apache.orc.impl.BufferChunk;
 import org.apache.orc.ColumnStatistics;
 import org.apache.orc.impl.ColumnStatisticsImpl;
@@ -71,6 +73,7 @@ public class ReaderImpl implements Reader {
   private final List<OrcProto.StripeStatistics> stripeStats;
   private final int metadataSize;
   protected final List<OrcProto.Type> types;
+  private final TypeDescription schema;
   private final List<OrcProto.UserMetadataItem> userMetadata;
   private final List<OrcProto.ColumnStatistics> fileStats;
   private final List<StripeInformation> stripes;
@@ -243,6 +246,11 @@ public class ReaderImpl implements Reader {
     return result;
   }
 
+  @Override
+  public TypeDescription getSchema() {
+    return schema;
+  }
+
   /**
    * Ensure this is an ORC file to prevent users from trying to read text
    * files or RC files as ORC files.
@@ -386,7 +394,9 @@ public class ReaderImpl implements Reader {
       this.writerVersion = footerMetaData.writerVersion;
       this.stripes = convertProtoStripesToStripes(rInfo.footer.getStripesList());
     }
+    this.schema = OrcUtils.convertTypeFromProtobuf(this.types, 0);
   }
+
   /**
    * Get the WriterVersion based on the ORC file postscript.
    * @param writerVersion the integer writer version
@@ -668,7 +678,7 @@ public class ReaderImpl implements Reader {
       options.include(include);
     }
     return new RecordReaderImpl(this.getStripes(), fileSystem, path,
-        options, types, codec, bufferSize, rowIndexStride, conf);
+        options, schema, types, codec, bufferSize, rowIndexStride, conf);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index 3975409..c214658 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -27,9 +27,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.orc.BooleanColumnStatistics;
-import org.apache.orc.OrcUtils;
+import org.apache.orc.TypeDescription;
 import org.apache.orc.impl.BufferChunk;
 import org.apache.orc.ColumnStatistics;
 import org.apache.orc.impl.ColumnStatisticsImpl;
@@ -58,7 +57,6 @@ import org.apache.hadoop.hive.common.io.DiskRange;
 import org.apache.hadoop.hive.common.io.DiskRangeList;
 import org.apache.hadoop.hive.common.io.DiskRangeList.CreateHelper;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
-import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.orc.BloomFilterIO;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
@@ -98,7 +96,6 @@ public class RecordReaderImpl implements RecordReader {
   private final SargApplier sargApp;
   // an array about which row groups aren't skipped
   private boolean[] includedRowGroups = null;
-  private final Configuration conf;
   private final MetadataReader metadata;
   private final DataReader dataReader;
 
@@ -145,33 +142,33 @@ public class RecordReaderImpl implements RecordReader {
                              FileSystem fileSystem,
                              Path path,
                              Reader.Options options,
+                             TypeDescription fileSchema,
                              List<OrcProto.Type> types,
                              CompressionCodec codec,
                              int bufferSize,
                              long strideRate,
                              Configuration conf
                              ) throws IOException {
-
-    TreeReaderFactory.TreeReaderSchema treeReaderSchema;
+    SchemaEvolution treeReaderSchema;
+    this.included = options.getInclude();
+    included[0] = true;
     if (options.getSchema() == null) {
       if (LOG.isInfoEnabled()) {
         LOG.info("Schema on read not provided -- using file schema " + types.toString());
       }
-      treeReaderSchema = new TreeReaderFactory.TreeReaderSchema().fileTypes(types).schemaTypes(types);
+      treeReaderSchema = new SchemaEvolution(fileSchema, included);
     } else {
 
       // Now that we are creating a record reader for a file, validate that the schema to read
       // is compatible with the file schema.
       //
-      List<OrcProto.Type> schemaTypes = OrcUtils.getOrcTypes(options.getSchema());
-      treeReaderSchema = SchemaEvolution.validateAndCreate(types, schemaTypes);
+      treeReaderSchema = new SchemaEvolution(fileSchema, options.getSchema(),
+          included);
     }
     this.path = path;
     this.codec = codec;
     this.types = types;
     this.bufferSize = bufferSize;
-    this.included = options.getInclude();
-    this.conf = conf;
     this.rowIndexStride = strideRate;
     this.metadata = new MetadataReaderImpl(fileSystem, path, codec, bufferSize, types.size());
     SearchArgument sarg = options.getSearchArgument();
@@ -210,7 +207,8 @@ public class RecordReaderImpl implements RecordReader {
       skipCorrupt = OrcConf.SKIP_CORRUPT_DATA.getBoolean(conf);
     }
 
-    reader = TreeReaderFactory.createTreeReader(0, treeReaderSchema, included, skipCorrupt);
+    reader = TreeReaderFactory.createTreeReader(treeReaderSchema.getReaderSchema(),
+        treeReaderSchema, included, skipCorrupt);
     indexes = new OrcProto.RowIndex[types.size()];
     bloomFilterIndices = new OrcProto.BloomFilterIndex[types.size()];
     advanceToNextRow(reader, 0L, true);
@@ -239,7 +237,7 @@ public class RecordReaderImpl implements RecordReader {
     return metadata.readStripeFooter(stripe);
   }
 
-  static enum Location {
+  enum Location {
     BEFORE, MIN, MIDDLE, MAX, AFTER
   }
 
@@ -1052,31 +1050,27 @@ public class RecordReaderImpl implements RecordReader {
   }
 
   @Override
-  public VectorizedRowBatch nextBatch(VectorizedRowBatch previous) throws IOException {
+  public boolean nextBatch(VectorizedRowBatch batch) throws IOException {
     try {
-      final VectorizedRowBatch result;
       if (rowInStripe >= rowCountInStripe) {
         currentStripe += 1;
+        if (currentStripe >= stripes.size()) {
+          batch.size = 0;
+          return false;
+        }
         readStripe();
       }
 
-      final int batchSize = computeBatchSize(VectorizedRowBatch.DEFAULT_SIZE);
+      int batchSize = computeBatchSize(batch.getMaxSize());
 
       rowInStripe += batchSize;
-      if (previous == null) {
-        ColumnVector[] cols = (ColumnVector[]) reader.nextVector(null, (int) batchSize);
-        result = new VectorizedRowBatch(cols.length);
-        result.cols = cols;
-      } else {
-        result = previous;
-        result.selectedInUse = false;
-        reader.setVectorColumnCount(result.getDataColumnCount());
-        reader.nextVector(result.cols, batchSize);
-      }
+      reader.setVectorColumnCount(batch.getDataColumnCount());
+      reader.nextBatch(batch, batchSize);
 
-      result.size = batchSize;
+      batch.size = (int) batchSize;
+      batch.selectedInUse = false;
       advanceToNextRow(reader, rowInStripe + rowBaseInStripe, true);
-      return result;
+      return batch.size  != 0;
     } catch (IOException e) {
       // Rethrow exception with file name in log message
       throw new IOException("Error reading file: " + path, e);

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SchemaEvolution.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SchemaEvolution.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SchemaEvolution.java
index f28ca13..6747691 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SchemaEvolution.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SchemaEvolution.java
@@ -20,13 +20,12 @@ package org.apache.hadoop.hive.ql.io.orc;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.ql.io.orc.TreeReaderFactory.TreeReaderSchema;
-import org.apache.orc.OrcProto;
-import org.apache.orc.OrcUtils;
 import org.apache.orc.TypeDescription;
 
 /**
@@ -34,103 +33,134 @@ import org.apache.orc.TypeDescription;
  * has been schema evolution.
  */
 public class SchemaEvolution {
-
+  private final Map<TypeDescription, TypeDescription> readerToFile;
+  private final boolean[] included;
+  private final TypeDescription readerSchema;
   private static final Log LOG = LogFactory.getLog(SchemaEvolution.class);
 
-  public static TreeReaderSchema validateAndCreate(List<OrcProto.Type> fileTypes,
-      List<OrcProto.Type> schemaTypes) throws IOException {
+  public SchemaEvolution(TypeDescription readerSchema, boolean[] included) {
+    this.included = included;
+    readerToFile = null;
+    this.readerSchema = readerSchema;
+  }
 
-    // For ACID, the row is the ROW field in the outer STRUCT.
-    final boolean isAcid = checkAcidSchema(fileTypes);
-    final List<OrcProto.Type> rowSchema;
-    int rowSubtype;
-    if (isAcid) {
-      rowSubtype = OrcRecordUpdater.ROW + 1;
-      rowSchema = fileTypes.subList(rowSubtype, fileTypes.size());
+  public SchemaEvolution(TypeDescription fileSchema,
+                         TypeDescription readerSchema,
+                         boolean[] included) throws IOException {
+    readerToFile = new HashMap<>(readerSchema.getMaximumId() + 1);
+    this.included = included;
+    if (checkAcidSchema(fileSchema)) {
+      this.readerSchema = createEventSchema(readerSchema);
     } else {
-      rowSubtype = 0;
-      rowSchema = fileTypes;
+      this.readerSchema = readerSchema;
     }
+    buildMapping(fileSchema, this.readerSchema);
+  }
 
-    // Do checking on the overlap.  Additional columns will be defaulted to NULL.
-
-    int numFileColumns = rowSchema.get(0).getSubtypesCount();
-    int numDesiredColumns = schemaTypes.get(0).getSubtypesCount();
-
-    int numReadColumns = Math.min(numFileColumns, numDesiredColumns);
-
-    /**
-     * Check type promotion.
-     *
-     * Currently, we only support integer type promotions that can be done "implicitly".
-     * That is, we know that using a bigger integer tree reader on the original smaller integer
-     * column will "just work".
-     *
-     * In the future, other type promotions might require type conversion.
-     */
-    // short -> int -> bigint as same integer readers are used for the above types.
-
-    for (int i = 0; i < numReadColumns; i++) {
-      OrcProto.Type fColType = fileTypes.get(rowSubtype + i);
-      OrcProto.Type rColType = schemaTypes.get(i);
-      if (!fColType.getKind().equals(rColType.getKind())) {
-
-        boolean ok = false;
-        if (fColType.getKind().equals(OrcProto.Type.Kind.SHORT)) {
+  public TypeDescription getReaderSchema() {
+    return readerSchema;
+  }
 
-          if (rColType.getKind().equals(OrcProto.Type.Kind.INT) ||
-              rColType.getKind().equals(OrcProto.Type.Kind.LONG)) {
-            // type promotion possible, converting SHORT to INT/LONG requested type
-            ok = true;
-          }
-        } else if (fColType.getKind().equals(OrcProto.Type.Kind.INT)) {
+  public TypeDescription getFileType(TypeDescription readerType) {
+    TypeDescription result;
+    if (readerToFile == null) {
+      if (included == null || included[readerType.getId()]) {
+        result = readerType;
+      } else {
+        result = null;
+      }
+    } else {
+      result = readerToFile.get(readerType);
+    }
+    return result;
+  }
 
-          if (rColType.getKind().equals(OrcProto.Type.Kind.LONG)) {
-            // type promotion possible, converting INT to LONG requested type
-            ok = true;
+  void buildMapping(TypeDescription fileType,
+                    TypeDescription readerType) throws IOException {
+    // if the column isn't included, don't map it
+    if (included != null && !included[readerType.getId()]) {
+      return;
+    }
+    boolean isOk = true;
+    // check the easy case first
+    if (fileType.getCategory() == readerType.getCategory()) {
+      switch (readerType.getCategory()) {
+        case BOOLEAN:
+        case BYTE:
+        case SHORT:
+        case INT:
+        case LONG:
+        case DOUBLE:
+        case FLOAT:
+        case STRING:
+        case TIMESTAMP:
+        case BINARY:
+        case DATE:
+          // these are always a match
+          break;
+        case CHAR:
+        case VARCHAR:
+          isOk = fileType.getMaxLength() == readerType.getMaxLength();
+          break;
+        case DECIMAL:
+          // TODO we don't enforce scale and precision checks, but probably should
+          break;
+        case UNION:
+        case MAP:
+        case LIST: {
+          // these must be an exact match
+          List<TypeDescription> fileChildren = fileType.getChildren();
+          List<TypeDescription> readerChildren = readerType.getChildren();
+          if (fileChildren.size() == readerChildren.size()) {
+            for(int i=0; i < fileChildren.size(); ++i) {
+              buildMapping(fileChildren.get(i), readerChildren.get(i));
+            }
+          } else {
+            isOk = false;
           }
+          break;
         }
-
-        if (!ok) {
-          throw new IOException("ORC does not support type conversion from " +
-              fColType.getKind().name() + " to " + rColType.getKind().name());
+        case STRUCT: {
+          // allow either side to have fewer fields than the other
+          List<TypeDescription> fileChildren = fileType.getChildren();
+          List<TypeDescription> readerChildren = readerType.getChildren();
+          int jointSize = Math.min(fileChildren.size(), readerChildren.size());
+          for(int i=0; i < jointSize; ++i) {
+            buildMapping(fileChildren.get(i), readerChildren.get(i));
+          }
+          break;
         }
+        default:
+          throw new IllegalArgumentException("Unknown type " + readerType);
       }
-    }
-
-    List<OrcProto.Type> fullSchemaTypes;
-
-    if (isAcid) {
-      fullSchemaTypes = new ArrayList<OrcProto.Type>();
-
-      // This copies the ACID struct type which is subtype = 0.
-      // It has field names "operation" through "row".
-      // And we copy the types for all fields EXCEPT ROW (which must be last!).
-
-      for (int i = 0; i < rowSubtype; i++) {
-        fullSchemaTypes.add(fileTypes.get(i).toBuilder().build());
+    } else {
+      switch (fileType.getCategory()) {
+        case SHORT:
+          if (readerType.getCategory() != TypeDescription.Category.INT &&
+              readerType.getCategory() != TypeDescription.Category.LONG) {
+            isOk = false;
+          }
+          break;
+        case INT:
+          if (readerType.getCategory() != TypeDescription.Category.LONG) {
+            isOk = false;
+          }
+          break;
+        default:
+          isOk = false;
       }
-
-      // Add the row struct type.
-      OrcUtils.appendOrcTypesRebuildSubtypes(fullSchemaTypes, schemaTypes, 0);
+    }
+    if (isOk) {
+      readerToFile.put(readerType, fileType);
     } else {
-      fullSchemaTypes = schemaTypes;
+      throw new IOException("ORC does not support type conversion from " +
+          fileType + " to " + readerType);
     }
-
-    int innerStructSubtype = rowSubtype;
-
-    // LOG.info("Schema evolution: (fileTypes) " + fileTypes.toString() +
-    //     " (schemaEvolutionTypes) " + schemaEvolutionTypes.toString());
-
-    return new TreeReaderSchema().
-        fileTypes(fileTypes).
-        schemaTypes(fullSchemaTypes).
-        innerStructSubtype(innerStructSubtype);
   }
 
-  private static boolean checkAcidSchema(List<OrcProto.Type> fileSchema) {
-    if (fileSchema.get(0).getKind().equals(OrcProto.Type.Kind.STRUCT)) {
-      List<String> rootFields = fileSchema.get(0).getFieldNamesList();
+  private static boolean checkAcidSchema(TypeDescription type) {
+    if (type.getCategory().equals(TypeDescription.Category.STRUCT)) {
+      List<String> rootFields = type.getFieldNames();
       if (acidEventFieldNames.equals(rootFields)) {
         return true;
       }
@@ -142,26 +172,14 @@ public class SchemaEvolution {
    * @param typeDescr
    * @return ORC types for the ACID event based on the row's type description
    */
-  public static List<OrcProto.Type> createEventSchema(TypeDescription typeDescr) {
-
-    List<OrcProto.Type> result = new ArrayList<OrcProto.Type>();
-
-    OrcProto.Type.Builder type = OrcProto.Type.newBuilder();
-    type.setKind(OrcProto.Type.Kind.STRUCT);
-    type.addAllFieldNames(acidEventFieldNames);
-    for (int i = 0; i < acidEventFieldNames.size(); i++) {
-      type.addSubtypes(i + 1);
-    }
-    result.add(type.build());
-
-    // Automatically add all fields except the last (ROW).
-    for (int i = 0; i < acidEventOrcTypeKinds.size() - 1; i ++) {
-      type.clear();
-      type.setKind(acidEventOrcTypeKinds.get(i));
-      result.add(type.build());
-    }
-
-    OrcUtils.appendOrcTypesRebuildSubtypes(result, typeDescr);
+  public static TypeDescription createEventSchema(TypeDescription typeDescr) {
+    TypeDescription result = TypeDescription.createStruct()
+        .addField("operation", TypeDescription.createInt())
+        .addField("originalTransaction", TypeDescription.createLong())
+        .addField("bucket", TypeDescription.createInt())
+        .addField("rowId", TypeDescription.createLong())
+        .addField("currentTransaction", TypeDescription.createLong())
+        .addField("row", typeDescr.clone());
     return result;
   }
 
@@ -174,14 +192,4 @@ public class SchemaEvolution {
     acidEventFieldNames.add("currentTransaction");
     acidEventFieldNames.add("row");
   }
-  public static final List<OrcProto.Type.Kind> acidEventOrcTypeKinds =
-      new ArrayList<OrcProto.Type.Kind>();
-  static {
-    acidEventOrcTypeKinds.add(OrcProto.Type.Kind.INT);
-    acidEventOrcTypeKinds.add(OrcProto.Type.Kind.LONG);
-    acidEventOrcTypeKinds.add(OrcProto.Type.Kind.INT);
-    acidEventOrcTypeKinds.add(OrcProto.Type.Kind.LONG);
-    acidEventOrcTypeKinds.add(OrcProto.Type.Kind.LONG);
-    acidEventOrcTypeKinds.add(OrcProto.Type.Kind.STRUCT);
-  }
 }


[28/58] [abbrv] hive git commit: HIVE-13491 : Testing : log thread stacks when metastore fails to start (Thejas Nair, reviewed by Szehon Ho)

Posted by jd...@apache.org.
HIVE-13491 : Testing : log thread stacks when metastore fails to start (Thejas Nair, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e16bcca6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e16bcca6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e16bcca6

Branch: refs/heads/llap
Commit: e16bcca649bb55375860917726fab7545717d5c2
Parents: 547b37d
Author: Thejas Nair <th...@hortonworks.com>
Authored: Tue Apr 12 12:09:47 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Tue Apr 12 12:10:11 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hive/metastore/MetaStoreUtils.java   | 28 ++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e16bcca6/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 866e1c3..76220f4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -1209,16 +1209,40 @@ public class MetaStoreUtils {
         socket.close();
         return;
       } catch (Exception e) {
-        if (retries++ > 6) { //give up
+        if (retries++ > 60) { //give up
           exc = e;
           break;
         }
-        Thread.sleep(10000);
+        Thread.sleep(1000);
       }
     }
+    // something is preventing metastore from starting
+    // print the stack from all threads for debugging purposes
+    LOG.error("Unable to connect to metastore server: " + exc.getMessage());
+    LOG.info("Printing all thread stack traces for debugging before throwing exception.");
+    LOG.info(getAllThreadStacksAsString());
     throw exc;
   }
 
+  private static String getAllThreadStacksAsString() {
+    Map<Thread, StackTraceElement[]> threadStacks = Thread.getAllStackTraces();
+    StringBuilder sb = new StringBuilder();
+    for (Map.Entry<Thread, StackTraceElement[]> entry : threadStacks.entrySet()) {
+      Thread t = entry.getKey();
+      sb.append(System.lineSeparator());
+      sb.append("Name: ").append(t.getName()).append(" State: " + t.getState());
+      addStackString(entry.getValue(), sb);
+    }
+    return sb.toString();
+  }
+
+  private static void addStackString(StackTraceElement[] stackElems, StringBuilder sb) {
+    sb.append(System.lineSeparator());
+    for (StackTraceElement stackElem : stackElems) {
+      sb.append(stackElem).append(System.lineSeparator());
+    }
+  }
+
   /**
    * Finds a free port on the machine.
    *


[20/58] [abbrv] hive git commit: HIVE-13413. Add a llapstatus command line tool. (Siddharth Seth, reviewed by Prasanth Jayachandran)

Posted by jd...@apache.org.
HIVE-13413. Add a llapstatus command line tool. (Siddharth Seth, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/af4be3de
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/af4be3de
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/af4be3de

Branch: refs/heads/llap
Commit: af4be3de160c44966f706698c1b9aabbb6f4f9e0
Parents: 37e6e1b
Author: Siddharth Seth <ss...@apache.org>
Authored: Mon Apr 11 13:21:34 2016 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Mon Apr 11 13:21:34 2016 -0700

----------------------------------------------------------------------
 bin/ext/llapstatus.sh                           |  42 +
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   3 +
 .../hive/llap/registry/ServiceInstance.java     |   6 +
 .../registry/impl/LlapFixedRegistryImpl.java    |  24 +
 .../impl/LlapZookeeperRegistryImpl.java         |  16 +-
 llap-server/pom.xml                             |  55 ++
 .../hive/llap/cli/LlapOptionsProcessor.java     |   2 +-
 .../llap/cli/LlapStatusOptionsProcessor.java    | 139 ++++
 .../hive/llap/cli/LlapStatusServiceDriver.java  | 821 +++++++++++++++++++
 .../hive/llap/daemon/impl/LlapDaemon.java       |   9 +
 .../main/resources/llap-cli-log4j2.properties   |   9 +-
 pom.xml                                         |   1 +
 12 files changed, 1119 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/bin/ext/llapstatus.sh
----------------------------------------------------------------------
diff --git a/bin/ext/llapstatus.sh b/bin/ext/llapstatus.sh
new file mode 100644
index 0000000..96edda2
--- /dev/null
+++ b/bin/ext/llapstatus.sh
@@ -0,0 +1,42 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THISSERVICE=llapstatus
+export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
+
+llapstatus () {
+  CLASS=org.apache.hadoop.hive.llap.cli.LlapStatusServiceDriver;
+  if [ ! -f ${HIVE_LIB}/hive-cli-*.jar ]; then
+    echo "Missing Hive CLI Jar"
+    exit 3;
+  fi
+
+  if $cygwin; then
+    HIVE_LIB=`cygpath -w "$HIVE_LIB"`
+  fi
+
+  set -e;
+
+  export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configurationFile=llap-cli-log4j2.properties "
+  # hadoop 20 or newer - skip the aux_jars option. picked up from hiveconf
+  $HADOOP $CLASS $HIVE_OPTS "$@"
+  
+}
+
+llapstatus_help () {
+  CLASS=org.apache.hadoop.hive.llap.cli.LlapStatusServiceDriver;
+  execHiveCmd $CLASS "--help"
+} 
+

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 95c5c0e..fabb8ab 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -330,6 +330,7 @@ public class HiveConf extends Configuration {
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_PORT.varname);
     llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_SSL.varname);
+    llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
   }
 
   /**
@@ -2640,6 +2641,8 @@ public class HiveConf extends Configuration {
     LLAP_DAEMON_QUEUE_NAME("hive.llap.daemon.queue.name", null,
         "Queue name within which the llap slider application will run." +
         " Used in LlapServiceDriver and package.py"),
+    LLAP_DAEMON_CONTAINER_ID("hive.llap.daemon.container.id", null,
+        "ContainerId of a running LlapDaemon. Used to publish to the registry"),
     LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED("hive.llap.daemon.shuffle.dir.watcher.enabled", false,
       "TODO doc", "llap.daemon.shuffle.dir-watcher.enabled"),
     LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS(

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
----------------------------------------------------------------------
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
index 2bd860a..7e37e96 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
@@ -53,6 +53,12 @@ public interface ServiceInstance {
    */
   public int getShufflePort();
 
+
+  /**
+   * Address for services hosted on http
+   * @return
+   */
+  public String getServicesAddress();
   /**
    * Return the last known state (without refreshing)
    * 

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
----------------------------------------------------------------------
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
index 3f667d0..bd814b9 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
@@ -16,6 +16,8 @@ package org.apache.hadoop.hive.llap.registry.impl;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URL;
 import java.net.UnknownHostException;
 import java.util.Collections;
 import java.util.Comparator;
@@ -52,6 +54,8 @@ public class LlapFixedRegistryImpl implements ServiceRegistry {
   private final int port;
   private final int shuffle;
   private final int mngPort;
+  private final int webPort;
+  private final String webScheme;
   private final String[] hosts;
   private final int memory;
   private final int vcores;
@@ -66,6 +70,11 @@ public class LlapFixedRegistryImpl implements ServiceRegistry {
     this.resolveHosts = conf.getBoolean(FIXED_REGISTRY_RESOLVE_HOST_NAMES, true);
     this.mngPort = HiveConf.getIntVar(conf, ConfVars.LLAP_MANAGEMENT_RPC_PORT);
 
+
+    this.webPort = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_WEB_PORT);
+    boolean isSsl = HiveConf.getBoolVar(conf, ConfVars.LLAP_DAEMON_WEB_SSL);
+    this.webScheme = isSsl ? "https" : "http";
+
     for (Map.Entry<String, String> kv : conf) {
       if (kv.getKey().startsWith(HiveConf.PREFIX_LLAP)
           || kv.getKey().startsWith(HiveConf.PREFIX_HIVE_LLAP)) {
@@ -107,6 +116,7 @@ public class LlapFixedRegistryImpl implements ServiceRegistry {
   private final class FixedServiceInstance implements ServiceInstance {
 
     private final String host;
+    private final String serviceAddress;
 
     public FixedServiceInstance(String host) {
       if (resolveHosts) {
@@ -124,6 +134,15 @@ public class LlapFixedRegistryImpl implements ServiceRegistry {
         }
       }
       this.host = host;
+      final URL serviceURL;
+      try {
+        serviceURL =
+            new URL(LlapFixedRegistryImpl.this.webScheme, host, LlapFixedRegistryImpl.this.webPort,
+                "");
+        this.serviceAddress = serviceURL.toString();
+      } catch (MalformedURLException e) {
+        throw new RuntimeException(e);
+      }
     }
 
     public String getWorkerIdentity() {
@@ -152,6 +171,11 @@ public class LlapFixedRegistryImpl implements ServiceRegistry {
     }
 
     @Override
+    public String getServicesAddress() {
+      return serviceAddress;
+    }
+
+    @Override
     public boolean isAlive() {
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
----------------------------------------------------------------------
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
index 3538bb2..275cbc2 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
@@ -310,13 +310,19 @@ public class LlapZookeeperRegistryImpl implements ServiceRegistry {
     private final int rpcPort;
     private final int mngPort;
     private final int shufflePort;
+    private final String serviceAddress;
 
     public DynamicServiceInstance(ServiceRecord srv) throws IOException {
       this.srv = srv;
 
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Working with ServiceRecord: {}", srv);
+      }
+
       final Endpoint shuffle = srv.getInternalEndpoint(IPC_SHUFFLE);
       final Endpoint rpc = srv.getInternalEndpoint(IPC_LLAP);
       final Endpoint mng = srv.getInternalEndpoint(IPC_MNG);
+      final Endpoint services = srv.getExternalEndpoint(IPC_SERVICES);
 
       this.host =
           RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
@@ -330,6 +336,8 @@ public class LlapZookeeperRegistryImpl implements ServiceRegistry {
       this.shufflePort =
           Integer.valueOf(RegistryTypeUtils.getAddressField(shuffle.addresses.get(0),
               AddressTypes.ADDRESS_PORT_FIELD));
+      this.serviceAddress =
+          RegistryTypeUtils.getAddressField(services.addresses.get(0), AddressTypes.ADDRESS_URI);
     }
 
     @Override
@@ -353,6 +361,11 @@ public class LlapZookeeperRegistryImpl implements ServiceRegistry {
     }
 
     @Override
+    public String getServicesAddress() {
+      return serviceAddress;
+    }
+
+    @Override
     public boolean isAlive() {
       return alive;
     }
@@ -378,7 +391,8 @@ public class LlapZookeeperRegistryImpl implements ServiceRegistry {
     @Override
     public String toString() {
       return "DynamicServiceInstance [alive=" + alive + ", host=" + host + ":" + rpcPort +
-          " with resources=" + getResource() + "]";
+          " with resources=" + getResource() + ", shufflePort=" + getShufflePort() +
+          ", servicesAddress=" + getServicesAddress() +  ", mgmtPort=" + getManagementPort() + "]";
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-server/pom.xml
----------------------------------------------------------------------
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index c81bdb2..9de3443 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -159,6 +159,61 @@
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>org.apache.slider</groupId>
+      <artifactId>slider-core</artifactId>
+      <version>${slider.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-server-web-proxy</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-digester</groupId>
+          <artifactId>commons-digester</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.codahale.metrics</groupId>
+          <artifactId>metrics-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.codahale.metrics</groupId>
+          <artifactId>metrics-servlets</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.xml.bind</groupId>
+          <artifactId>jaxb-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.inject</groupId>
+          <artifactId>guice</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey.contribs</groupId>
+          <artifactId>jersey-guice</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty-sslengine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jettison</groupId>
+          <artifactId>jettison</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>asm</groupId>
+          <artifactId>asm</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
 
     <!-- test inter-project -->
     <dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
index dd908fc..f628ddf 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
@@ -64,7 +64,7 @@ public class LlapOptionsProcessor {
   public static final String OPTION_OUTPUT_DIR = "output";
 
 
-  public class LlapOptions {
+  public static class LlapOptions {
     private final int instances;
     private final String directory;
     private final String name;

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
new file mode 100644
index 0000000..e3a100c
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusOptionsProcessor.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.cli;
+
+import jline.TerminalFactory;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LlapStatusOptionsProcessor {
+
+  private static final Logger LOG = LoggerFactory.getLogger(LlapStatusOptionsProcessor.class);
+
+  private static final String LLAPSTATUS_CONSTANT = "llapstatus";
+
+  enum OptionConstants {
+
+    NAME("name", 'n', "LLAP cluster name"),
+    HIVECONF("hiveconf", null, "Use value for given property. Overridden by explicit parameters", "property=value", 2),
+    HELP("help", 'H', "Print help information"),;
+
+
+    private final String longOpt;
+    private final Character shortOpt;
+    private final String description;
+    private final String argName;
+    private final int numArgs;
+
+    OptionConstants(String longOpt, char shortOpt, String description) {
+      this(longOpt, shortOpt, description, longOpt, 1);
+
+    }
+
+    OptionConstants(String longOpt, Character shortOpt, String description, String argName, int numArgs) {
+      this.longOpt = longOpt;
+      this.shortOpt = shortOpt;
+      this.description = description;
+      this.argName = argName;
+      this.numArgs = numArgs;
+    }
+
+    public String getLongOpt() {
+      return longOpt;
+    }
+
+    public Character getShortOpt() {
+      return shortOpt;
+    }
+
+    public String getDescription() {
+      return description;
+    }
+
+    public String getArgName() {
+      return argName;
+    }
+
+    public int getNumArgs() {
+      return numArgs;
+    }
+  }
+
+
+  public static class LlapStatusOptions {
+    private final String name;
+
+    LlapStatusOptions(String name) {
+      this.name = name;
+    }
+
+    public String getName() {
+      return name;
+    }
+  }
+
+  private final Options options = new Options();
+  private org.apache.commons.cli.CommandLine commandLine;
+
+  public LlapStatusOptionsProcessor() {
+
+    for (OptionConstants optionConstant : OptionConstants.values()) {
+
+      OptionBuilder optionBuilder = OptionBuilder.hasArgs(optionConstant.getNumArgs())
+          .withArgName(optionConstant.getArgName()).withLongOpt(optionConstant.getLongOpt())
+          .withDescription(optionConstant.getDescription());
+      if (optionConstant.getShortOpt() == null) {
+        options.addOption(optionBuilder.create());
+      } else {
+        options.addOption(optionBuilder.create(optionConstant.getShortOpt()));
+      }
+    }
+  }
+
+  public LlapStatusOptions processOptions(String[] args) throws ParseException {
+    commandLine = new GnuParser().parse(options, args);
+    if (commandLine.hasOption(OptionConstants.HELP.getShortOpt()) ||
+        false == commandLine.hasOption(OptionConstants.NAME.getLongOpt())) {
+      printUsage();
+      return null;
+    }
+
+    String name = commandLine.getOptionValue(OptionConstants.NAME.getLongOpt());
+    return new LlapStatusOptions(name);
+  }
+
+
+  private void printUsage() {
+    HelpFormatter hf = new HelpFormatter();
+    try {
+      int width = hf.getWidth();
+      int jlineWidth = TerminalFactory.get().getWidth();
+      width = Math.min(160, Math.max(jlineWidth, width)); // Ignore potentially incorrect values
+      hf.setWidth(width);
+    } catch (Throwable t) { // Ignore
+    }
+    hf.printHelp(LLAPSTATUS_CONSTANT, options);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
new file mode 100644
index 0000000..d1193ad
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapStatusServiceDriver.java
@@ -0,0 +1,821 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.llap.cli;
+
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.llap.cli.LlapStatusOptionsProcessor.LlapStatusOptions;
+import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
+import org.apache.hadoop.hive.llap.registry.ServiceInstance;
+import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.util.Clock;
+import org.apache.hadoop.yarn.util.SystemClock;
+import org.apache.slider.api.ClusterDescription;
+import org.apache.slider.api.ClusterDescriptionKeys;
+import org.apache.slider.api.StatusKeys;
+import org.apache.slider.client.SliderClient;
+import org.apache.slider.core.exceptions.SliderException;
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LlapStatusServiceDriver {
+
+  private static final Logger LOG = LoggerFactory.getLogger(LlapStatusServiceDriver.class);
+
+  private static final long FIND_YARN_APP_TIMEOUT = 20 * 1000l; // 20seconds to wait for app to be visible
+
+  private static final String AM_KEY = "slider-appmaster";
+  private static final String LLAP_KEY = "LLAP";
+
+  private final Configuration conf;
+  private final Clock clock = new SystemClock();
+  private final AppStatusBuilder appStatusBuilder = new AppStatusBuilder();
+
+  public LlapStatusServiceDriver() {
+    SessionState ss = SessionState.get();
+    conf = (ss != null) ? ss.getConf() : new HiveConf(SessionState.class);
+  }
+
+
+  private int run(String[] args) {
+
+    SliderClient sliderClient = null;
+    try {
+      LlapStatusOptionsProcessor optionsProcessor = new LlapStatusOptionsProcessor();
+      LlapStatusOptions options;
+      try {
+        options = optionsProcessor.processOptions(args);
+      } catch (Exception e) {
+        LOG.info("Failed to parse arguments", e);
+        return ExitCode.INCORRECT_USAGE.getInt();
+      }
+
+      for (String f : LlapDaemonConfiguration.DAEMON_CONFIGS) {
+        conf.addResource(f);
+      }
+      conf.reloadConfiguration();
+
+
+      try {
+        sliderClient = createSliderClient();
+      } catch (LlapStatusCliException e) {
+        logError(e);
+        return e.getExitCode().getInt();
+      }
+
+      // Get the App report from YARN
+      ApplicationReport appReport = null;
+      try {
+        appReport = getAppReport(options, sliderClient, FIND_YARN_APP_TIMEOUT);
+      } catch (LlapStatusCliException e) {
+        logError(e);
+        return e.getExitCode().getInt();
+      }
+
+      // Process the report to decide whether to go to slider.
+      ExitCode ret;
+      try {
+        ret = processAppReport(appReport, appStatusBuilder);
+      } catch (LlapStatusCliException e) {
+        logError(e);
+        return e.getExitCode().getInt();
+      }
+
+      if (ret != ExitCode.SUCCESS) {
+        return ret.getInt();
+      } else if (EnumSet.of(State.APP_NOT_FOUND, State.COMPLETE, State.LAUNCHING)
+          .contains(appStatusBuilder.getState())) {
+        return ExitCode.SUCCESS.getInt();
+      } else {
+        // Get information from slider.
+        try {
+          ret = populateAppStatusFromSlider(options, sliderClient, appStatusBuilder);
+        } catch (LlapStatusCliException e) {
+          // In case of failure, send back whatever is constructed sop far - which wouldbe from the AppReport
+          logError(e);
+          return e.getExitCode().getInt();
+        }
+      }
+
+      if (ret !=ExitCode.SUCCESS ) {
+        return ret.getInt();
+      } else {
+        try {
+          ret = populateAppStatusFromLlapRegistry(options, appStatusBuilder);
+        } catch (LlapStatusCliException e) {
+          logError(e);
+          return e.getExitCode().getInt();
+        }
+      }
+      return ret.getInt();
+    }finally {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Final AppState: " + appStatusBuilder.toString());
+      }
+      if (sliderClient != null) {
+        sliderClient.stop();
+      }
+    }
+  }
+
+  private void outputJson() throws LlapStatusCliException {
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.configure(SerializationConfig.Feature.FAIL_ON_EMPTY_BEANS, false);
+    mapper.setSerializationInclusion(JsonSerialize.Inclusion.NON_NULL);
+    mapper.setSerializationInclusion(JsonSerialize.Inclusion.NON_EMPTY);
+    try {
+      System.out
+          .println(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(appStatusBuilder));
+    } catch (IOException e) {
+      throw new LlapStatusCliException(ExitCode.LLAP_JSON_GENERATION_ERROR, "Failed to create JSON",
+          e);
+    }
+  }
+
+  private SliderClient createSliderClient() throws LlapStatusCliException {
+    SliderClient sliderClient;
+    try {
+      sliderClient = new SliderClient() {
+        @Override
+        public void serviceInit(Configuration conf) throws Exception {
+          super.serviceInit(conf);
+          initHadoopBinding();
+        }
+      };
+      Configuration sliderClientConf = new Configuration(conf);
+      sliderClientConf = sliderClient.bindArgs(sliderClientConf,
+          new String[] { "help" });
+      sliderClient.init(sliderClientConf);
+      sliderClient.start();
+      return sliderClient;
+    } catch (Exception e) {
+      throw new LlapStatusCliException(ExitCode.SLIDER_CLIENT_ERROR_CREATE_FAILED,
+          "Failed to create slider client", e);
+    }
+  }
+
+
+  private ApplicationReport getAppReport(LlapStatusOptions options, SliderClient sliderClient,
+                                         long timeoutMs) throws LlapStatusCliException {
+
+    long startTime = clock.getTime();
+    long timeoutTime = startTime + timeoutMs;
+    ApplicationReport appReport = null;
+
+    // TODO HIVE-13454 Maybe add an option to wait for a certain amount of time for the app to
+    // move to running state. Potentially even wait for the containers to be launched.
+    while (clock.getTime() < timeoutTime && appReport == null) {
+      try {
+        appReport = sliderClient.getYarnAppListClient().findInstance(options.getName());
+        if (appReport == null) {
+          long remainingTime = Math.min(timeoutTime - clock.getTime(), 500l);
+          if (remainingTime > 0) {
+            Thread.sleep(remainingTime);
+          } else {
+            break;
+          }
+        }
+      } catch (Exception e) { // No point separating IOException vs YarnException vs others
+        throw new LlapStatusCliException(ExitCode.YARN_ERROR,
+            "Failed to get Yarn AppReport", e);
+      }
+    }
+    return appReport;
+  }
+
+
+  /**
+   * Populates parts of the AppStatus
+   *
+   * @param appReport
+   * @param appStatusBuilder
+   * @return an ExitCode. An ExitCode other than ExitCode.SUCCESS implies future progress not possible
+   * @throws LlapStatusCliException
+   */
+  private ExitCode processAppReport(ApplicationReport appReport,
+                               AppStatusBuilder appStatusBuilder) throws LlapStatusCliException {
+    if (appReport == null) {
+      appStatusBuilder.setState(State.APP_NOT_FOUND);
+      LOG.info("No Application Found");
+      return ExitCode.SUCCESS;
+    }
+
+    appStatusBuilder.setAmInfo(
+        new AmInfo().setAppName(appReport.getName()).setAppType(appReport.getApplicationType()));
+    appStatusBuilder.setAppStartTime(appReport.getStartTime());
+    switch (appReport.getYarnApplicationState()) {
+      case NEW:
+      case NEW_SAVING:
+      case SUBMITTED:
+        appStatusBuilder.setState(State.LAUNCHING);
+        return ExitCode.SUCCESS;
+      case ACCEPTED:
+        appStatusBuilder.maybeCreateAndGetAmInfo().setAppId(appReport.getApplicationId().toString());
+        appStatusBuilder.setState(State.LAUNCHING);
+        return ExitCode.SUCCESS;
+      case RUNNING:
+        appStatusBuilder.maybeCreateAndGetAmInfo().setAppId(appReport.getApplicationId().toString());
+        // If the app state is running, get additional information from Slider itself.
+        return ExitCode.SUCCESS;
+      case FINISHED:
+      case FAILED:
+      case KILLED:
+        appStatusBuilder.maybeCreateAndGetAmInfo().setAppId(appReport.getApplicationId().toString());
+        appStatusBuilder.setAppFinishTime(appReport.getFinishTime());
+        appStatusBuilder.setState(State.COMPLETE);
+        return ExitCode.SUCCESS;
+      default:
+        throw new LlapStatusCliException(ExitCode.INTERNAL_ERROR,
+            "Unknown Yarn Application State: " + appReport.getYarnApplicationState());
+    }
+  }
+
+
+  /**
+   *
+   * @param options
+   * @param sliderClient
+   * @param appStatusBuilder
+   * @return an ExitCode. An ExitCode other than ExitCode.SUCCESS implies future progress not possible
+   * @throws LlapStatusCliException
+   */
+  private ExitCode populateAppStatusFromSlider(LlapStatusOptions options, SliderClient sliderClient, AppStatusBuilder appStatusBuilder) throws
+      LlapStatusCliException {
+
+    ClusterDescription clusterDescription;
+    try {
+      clusterDescription = sliderClient.getClusterDescription(options.getName());
+    } catch (SliderException e) {
+      throw new LlapStatusCliException(ExitCode.SLIDER_CLIENT_ERROR_OTHER,
+          "Failed to get cluster description from slider. SliderErrorCode=" + (e).getExitCode(), e);
+    } catch (Exception e) {
+      throw new LlapStatusCliException(ExitCode.SLIDER_CLIENT_ERROR_OTHER,
+          "Failed to get cluster description from slider", e);
+    }
+
+    if (clusterDescription == null) {
+      LOG.info("Slider ClusterDescription not available");
+      return ExitCode.SLIDER_CLIENT_ERROR_OTHER; // ClusterDescription should always be present.
+    } else {
+      // Process the Cluster Status returned by slider.
+      appStatusBuilder.setOriginalConfigurationPath(clusterDescription.originConfigurationPath);
+      appStatusBuilder.setGeneratedConfigurationPath(clusterDescription.generatedConfigurationPath);
+      appStatusBuilder.setAppStartTime(clusterDescription.createTime);
+
+      // Finish populating AMInfo
+      appStatusBuilder.maybeCreateAndGetAmInfo().setAmWebUrl(clusterDescription.getInfo(StatusKeys.INFO_AM_WEB_URL));
+      appStatusBuilder.maybeCreateAndGetAmInfo().setHostname(clusterDescription.getInfo(StatusKeys.INFO_AM_HOSTNAME));
+      appStatusBuilder.maybeCreateAndGetAmInfo().setContainerId(clusterDescription.getInfo(StatusKeys.INFO_AM_CONTAINER_ID));
+
+
+      if (clusterDescription.statistics != null) {
+        Map<String, Integer> llapStats = clusterDescription.statistics.get(LLAP_KEY);
+        if (llapStats != null) {
+          int desiredContainers = llapStats.get(StatusKeys.STATISTICS_CONTAINERS_DESIRED);
+          int liveContainers = llapStats.get(StatusKeys.STATISTICS_CONTAINERS_LIVE);
+          appStatusBuilder.setDesiredInstances(desiredContainers);
+          appStatusBuilder.setLiveInstances(liveContainers);
+        } else {
+          throw new LlapStatusCliException(ExitCode.SLIDER_CLIENT_ERROR_OTHER,
+              "Failed to get statistics for LLAP"); // Error since LLAP should always exist.
+        }
+        // TODO HIVE-13454 Use some information from here such as containers.start.failed
+        // and containers.failed.recently to provide an estimate of whether this app is healthy or not.
+      } else {
+        throw new LlapStatusCliException(ExitCode.SLIDER_CLIENT_ERROR_OTHER,
+            "Failed to get statistics"); // Error since statistics should always exist.
+      }
+
+      // Code to locate container status via slider. Not using this at the moment.
+      if (clusterDescription.status != null) {
+        Object liveObject = clusterDescription.status.get(ClusterDescriptionKeys.KEY_CLUSTER_LIVE);
+        if (liveObject != null) {
+          Map<String, Map<String, Map<String, Object>>> liveEntity =
+              (Map<String, Map<String, Map<String, Object>>>) liveObject;
+          Map<String, Map<String, Object>> llapEntity = liveEntity.get(LLAP_KEY);
+
+          if (llapEntity != null) { // Not a problem. Nothing has come up yet.
+            for (Map.Entry<String, Map<String, Object>> containerEntry : llapEntity.entrySet()) {
+              String containerIdString = containerEntry.getKey();
+              Map<String, Object> containerParams = containerEntry.getValue();
+
+              String host = (String) containerParams.get("host");
+
+              LlapInstance llapInstance = new LlapInstance(host, containerIdString);
+
+              appStatusBuilder.addNewLlapInstance(llapInstance);
+            }
+          }
+
+        }
+      }
+
+      return ExitCode.SUCCESS;
+
+    }
+  }
+
+
+  /**
+   *
+   * @param options
+   * @param appStatusBuilder
+   * @return an ExitCode. An ExitCode other than ExitCode.SUCCESS implies future progress not possible
+   * @throws LlapStatusCliException
+   */
+  private ExitCode populateAppStatusFromLlapRegistry(LlapStatusOptions options, AppStatusBuilder appStatusBuilder) throws
+      LlapStatusCliException {
+    Configuration llapRegistryConf= new Configuration(conf);
+    llapRegistryConf
+        .set(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + options.getName());
+    LlapRegistryService llapRegistry;
+    try {
+      llapRegistry = LlapRegistryService.getClient(llapRegistryConf);
+    } catch (Exception e) {
+      throw new LlapStatusCliException(ExitCode.LLAP_REGISTRY_ERROR,
+          "Failed to create llap registry client", e);
+    }
+    try {
+      Map<String, ServiceInstance> serviceInstanceMap;
+      try {
+        serviceInstanceMap = llapRegistry.getInstances().getAll();
+      } catch (IOException e) {
+        throw new LlapStatusCliException(ExitCode.LLAP_REGISTRY_ERROR, "Failed to get instances from llap registry", e);
+      }
+
+      if (serviceInstanceMap == null || serviceInstanceMap.isEmpty()) {
+        LOG.info("No information found in the LLAP registry");
+        appStatusBuilder.setLiveInstances(0);
+        appStatusBuilder.setState(State.LAUNCHING);
+        appStatusBuilder.clearLlapInstances();
+        return ExitCode.SUCCESS;
+      } else {
+
+
+        // Tracks instances known by both slider and llap.
+        List<LlapInstance> validatedInstances = new LinkedList<>();
+        List<String> llapExtraInstances = new LinkedList<>();
+
+        for (Map.Entry<String, ServiceInstance> serviceInstanceEntry : serviceInstanceMap
+            .entrySet()) {
+
+          ServiceInstance serviceInstance = serviceInstanceEntry.getValue();
+          String containerIdString = serviceInstance.getProperties().get(HiveConf.ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
+
+
+          LlapInstance llapInstance = appStatusBuilder.removeAndgetLlapInstanceForContainer(containerIdString);
+          if (llapInstance != null) {
+            llapInstance.setMgmtPort(serviceInstance.getManagementPort());
+            llapInstance.setRpcPort(serviceInstance.getRpcPort());
+            llapInstance.setShufflePort(serviceInstance.getShufflePort());
+            llapInstance.setWebUrl(serviceInstance.getServicesAddress());
+            llapInstance.setStatusUrl(serviceInstance.getServicesAddress() + "/status");
+            validatedInstances.add(llapInstance);
+          } else {
+            // This likely indicates that an instance has recently restarted
+            // (the old instance has not been unregistered), and the new instances has not registered yet.
+            llapExtraInstances.add(containerIdString);
+            // This instance will not be added back, since it's services are not up yet.
+          }
+
+        }
+
+        appStatusBuilder.setLiveInstances(validatedInstances.size());
+        if (validatedInstances.size() >= appStatusBuilder.getDesiredInstances()) {
+          appStatusBuilder.setState(State.RUNNING_ALL);
+          if (validatedInstances.size() > appStatusBuilder.getDesiredInstances()) {
+            LOG.warn("Found more entries in LLAP registry, as compared to desired entries");
+          }
+        } else {
+          appStatusBuilder.setState(State.RUNNING_PARTIAL);
+        }
+
+        // At this point, everything that can be consumed from AppStatusBuilder has been consumed.
+        // Debug only
+        if (appStatusBuilder.allInstances().size() > 0) {
+          // Containers likely to come up soon.
+          LOG.debug("Potential instances starting up: {}", appStatusBuilder.allInstances());
+        }
+        if (llapExtraInstances.size() > 0) {
+          // Old containers which are likely shutting down
+          LOG.debug("Instances likely to shutdown soon: {}", llapExtraInstances);
+        }
+
+        appStatusBuilder.clearAndAddPreviouslyKnownInstances(validatedInstances);
+
+      }
+      return ExitCode.SUCCESS;
+    } finally {
+      llapRegistry.stop();
+    }
+
+  }
+
+
+  static final class AppStatusBuilder {
+
+    private AmInfo amInfo;
+    private State state = State.UNKNOWN;
+    private String originalConfigurationPath;
+    private String generatedConfigurationPath;
+
+    private Integer desiredInstances;
+    private Integer liveInstances;
+
+    private Long appStartTime;
+    private Long appFinishTime;
+
+    private List<LlapInstance> llapInstances = new LinkedList<>();
+
+    private transient Map<String, LlapInstance> containerToInstanceMap = new HashMap<>();
+
+    public void setAmInfo(AmInfo amInfo) {
+      this.amInfo = amInfo;
+    }
+
+    public AppStatusBuilder setState(
+        State state) {
+      this.state = state;
+      return this;
+    }
+
+    public AppStatusBuilder setOriginalConfigurationPath(String originalConfigurationPath) {
+      this.originalConfigurationPath = originalConfigurationPath;
+      return this;
+    }
+
+    public AppStatusBuilder setGeneratedConfigurationPath(String generatedConfigurationPath) {
+      this.generatedConfigurationPath = generatedConfigurationPath;
+      return this;
+    }
+
+    public AppStatusBuilder setAppStartTime(long appStartTime) {
+      this.appStartTime = appStartTime;
+      return this;
+    }
+
+    public AppStatusBuilder setAppFinishTime(long finishTime) {
+      this.appFinishTime = finishTime;
+      return this;
+    }
+
+    public AppStatusBuilder setDesiredInstances(int desiredInstances) {
+      this.desiredInstances = desiredInstances;
+      return this;
+    }
+
+    public AppStatusBuilder setLiveInstances(int liveInstances) {
+      this.liveInstances = liveInstances;
+      return this;
+    }
+
+    public AppStatusBuilder addNewLlapInstance(LlapInstance llapInstance) {
+      this.llapInstances.add(llapInstance);
+      this.containerToInstanceMap.put(llapInstance.getContainerId(), llapInstance);
+      return this;
+    }
+
+    public LlapInstance removeAndgetLlapInstanceForContainer(String containerIdString) {
+      return containerToInstanceMap.remove(containerIdString);
+    }
+
+    public void clearLlapInstances() {
+      this.llapInstances.clear();
+      this.containerToInstanceMap.clear();
+    }
+
+    public AppStatusBuilder clearAndAddPreviouslyKnownInstances(List<LlapInstance> llapInstances) {
+      clearLlapInstances();
+      for (LlapInstance llapInstance : llapInstances) {
+        addNewLlapInstance(llapInstance);
+      }
+      return this;
+    }
+
+    @JsonIgnore
+    public List<LlapInstance> allInstances() {
+      return this.llapInstances;
+    }
+
+    public AmInfo getAmInfo() {
+      return amInfo;
+    }
+
+    public State getState() {
+      return state;
+    }
+
+    public String getOriginalConfigurationPath() {
+      return originalConfigurationPath;
+    }
+
+    public String getGeneratedConfigurationPath() {
+      return generatedConfigurationPath;
+    }
+
+    public Integer getDesiredInstances() {
+      return desiredInstances;
+    }
+
+    public Integer getLiveInstances() {
+      return liveInstances;
+    }
+
+    public Long getAppStartTime() {
+      return appStartTime;
+    }
+
+    public Long getAppFinishTime() {
+      return appFinishTime;
+    }
+
+    public List<LlapInstance> getLlapInstances() {
+      return llapInstances;
+    }
+
+    @JsonIgnore
+    public AmInfo maybeCreateAndGetAmInfo() {
+      if (amInfo == null) {
+        amInfo = new AmInfo();
+      }
+      return amInfo;
+    }
+
+    @Override
+    public String toString() {
+      return "AppStatusBuilder{" +
+          "amInfo=" + amInfo +
+          ", state=" + state +
+          ", originalConfigurationPath='" + originalConfigurationPath + '\'' +
+          ", generatedConfigurationPath='" + generatedConfigurationPath + '\'' +
+          ", desiredInstances=" + desiredInstances +
+          ", liveInstances=" + liveInstances +
+          ", appStartTime=" + appStartTime +
+          ", appFinishTime=" + appFinishTime +
+          ", llapInstances=" + llapInstances +
+          ", containerToInstanceMap=" + containerToInstanceMap +
+          '}';
+    }
+  }
+
+  static class AmInfo {
+    private String appName;
+    private String appType;
+    private String appId;
+    private String containerId;
+    private String hostname;
+    private String amWebUrl;
+
+    public AmInfo setAppName(String appName) {
+      this.appName = appName;
+      return this;
+    }
+
+    public AmInfo setAppType(String appType) {
+      this.appType = appType;
+      return this;
+    }
+
+    public AmInfo setAppId(String appId) {
+      this.appId = appId;
+      return this;
+    }
+
+    public AmInfo setContainerId(String containerId) {
+      this.containerId = containerId;
+      return this;
+    }
+
+    public AmInfo setHostname(String hostname) {
+      this.hostname = hostname;
+      return this;
+    }
+
+    public AmInfo setAmWebUrl(String amWebUrl) {
+      this.amWebUrl = amWebUrl;
+      return this;
+    }
+
+    public String getAppName() {
+      return appName;
+    }
+
+    public String getAppType() {
+      return appType;
+    }
+
+    public String getAppId() {
+      return appId;
+    }
+
+    public String getContainerId() {
+      return containerId;
+    }
+
+    public String getHostname() {
+      return hostname;
+    }
+
+    public String getAmWebUrl() {
+      return amWebUrl;
+    }
+
+    @Override
+    public String toString() {
+      return "AmInfo{" +
+          "appName='" + appName + '\'' +
+          ", appType='" + appType + '\'' +
+          ", appId='" + appId + '\'' +
+          ", containerId='" + containerId + '\'' +
+          ", hostname='" + hostname + '\'' +
+          ", amWebUrl='" + amWebUrl + '\'' +
+          '}';
+    }
+  }
+
+  static class LlapInstance {
+    private final String hostname;
+    private final String containerId;
+    private String statusUrl;
+    private String webUrl;
+    private Integer rpcPort;
+    private Integer mgmtPort;
+    private Integer  shufflePort;
+
+    // TODO HIVE-13454 Add additional information such as #executors, container size, etc
+
+    public LlapInstance(String hostname, String containerId) {
+      this.hostname = hostname;
+      this.containerId = containerId;
+    }
+
+    public LlapInstance setWebUrl(String webUrl) {
+      this.webUrl = webUrl;
+      return this;
+    }
+
+    public LlapInstance setStatusUrl(String statusUrl) {
+      this.statusUrl = statusUrl;
+      return this;
+    }
+
+    public LlapInstance setRpcPort(int rpcPort) {
+      this.rpcPort = rpcPort;
+      return this;
+    }
+
+    public LlapInstance setMgmtPort(int mgmtPort) {
+      this.mgmtPort = mgmtPort;
+      return this;
+    }
+
+    public LlapInstance setShufflePort(int shufflePort) {
+      this.shufflePort = shufflePort;
+      return this;
+    }
+
+    public String getHostname() {
+      return hostname;
+    }
+
+    public String getStatusUrl() {
+      return statusUrl;
+    }
+
+    public String getContainerId() {
+      return containerId;
+    }
+
+    public String getWebUrl() {
+      return webUrl;
+    }
+
+    public Integer getRpcPort() {
+      return rpcPort;
+    }
+
+    public Integer getMgmtPort() {
+      return mgmtPort;
+    }
+
+    public Integer getShufflePort() {
+      return shufflePort;
+    }
+
+    @Override
+    public String toString() {
+      return "LlapInstance{" +
+          "hostname='" + hostname + '\'' +
+          ", containerId='" + containerId + '\'' +
+          ", statusUrl='" + statusUrl + '\'' +
+          ", webUrl='" + webUrl + '\'' +
+          ", rpcPort=" + rpcPort +
+          ", mgmtPort=" + mgmtPort +
+          ", shufflePort=" + shufflePort +
+          '}';
+    }
+  }
+
+  static class LlapStatusCliException extends Exception {
+    final ExitCode exitCode;
+
+
+    public LlapStatusCliException(ExitCode exitCode, String message) {
+      super(exitCode.getInt() +": " + message);
+      this.exitCode = exitCode;
+    }
+
+    public LlapStatusCliException(ExitCode exitCode, String message, Throwable cause) {
+      super(message, cause);
+      this.exitCode = exitCode;
+    }
+
+    public ExitCode getExitCode() {
+      return exitCode;
+    }
+  }
+
+  enum State {
+    APP_NOT_FOUND, LAUNCHING,
+    RUNNING_PARTIAL,
+    RUNNING_ALL, COMPLETE, UNKNOWN
+  }
+
+  enum ExitCode {
+    SUCCESS(0),
+    INCORRECT_USAGE(10),
+    YARN_ERROR(20),
+    SLIDER_CLIENT_ERROR_CREATE_FAILED(30),
+    SLIDER_CLIENT_ERROR_OTHER(31),
+    LLAP_REGISTRY_ERROR(40),
+    LLAP_JSON_GENERATION_ERROR(50),
+    // Error in the script itself - likely caused by an incompatible change, or new functionality / states added.
+    INTERNAL_ERROR(100);
+
+    private final int exitCode;
+
+    ExitCode(int exitCode) {
+      this.exitCode = exitCode;
+    }
+
+    public int getInt() {
+      return exitCode;
+    }
+  }
+
+
+  private static void logError(Throwable t) {
+    LOG.error("FAILED: " + t.getMessage(), t);
+    System.err.println("FAILED: " + t.getMessage());
+  }
+
+
+  public static void main(String[] args) {
+    int ret;
+    try {
+      LlapStatusServiceDriver statusServiceDriver = new LlapStatusServiceDriver();
+      ret = statusServiceDriver.run(args);
+      if (ret == ExitCode.SUCCESS.getInt()) {
+          statusServiceDriver.outputJson();
+      }
+
+    } catch (Throwable t) {
+      logError(t);
+      ret = ExitCode.INTERNAL_ERROR.getInt();
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Completed processing - exiting with " + ret);
+    }
+    System.exit(ret);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
index 768aa8a..a0250cb 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.JvmPauseMonitor;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hive.common.util.ShutdownHookManager;
 import org.apache.logging.log4j.core.config.Configurator;
 import org.slf4j.Logger;
@@ -353,6 +354,14 @@ public class LlapDaemon extends CompositeService implements ContainerRunner, Lla
       // Cache settings will need to be setup in llap-daemon-site.xml - since the daemons don't read hive-site.xml
       // Ideally, these properties should be part of LlapDameonConf rather than HiveConf
       LlapDaemonConfiguration daemonConf = new LlapDaemonConfiguration();
+
+      String containerIdStr = System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name());
+      if (containerIdStr != null && !containerIdStr.isEmpty()) {
+        daemonConf.set(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname, containerIdStr);
+      } else {
+        daemonConf.unset(ConfVars.LLAP_DAEMON_CONTAINER_ID.varname);
+      }
+
       int numExecutors = HiveConf.getIntVar(daemonConf, ConfVars.LLAP_DAEMON_NUM_EXECUTORS);
 
       String localDirList = LlapUtil.getDaemonLocalDirList(daemonConf);

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/llap-server/src/main/resources/llap-cli-log4j2.properties
----------------------------------------------------------------------
diff --git a/llap-server/src/main/resources/llap-cli-log4j2.properties b/llap-server/src/main/resources/llap-cli-log4j2.properties
index 7542193..a141042 100644
--- a/llap-server/src/main/resources/llap-cli-log4j2.properties
+++ b/llap-server/src/main/resources/llap-cli-log4j2.properties
@@ -51,13 +51,10 @@ appender.DRFA.strategy.type = DefaultRolloverStrategy
 appender.DRFA.strategy.max = 30
 
 # list of all loggers
-loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HadoopConf
+loggers = ZooKeeper, DataNucleus, Datastore, JPOX, HadoopConf
 
-logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
-logger.NIOServerCnxn.level = WARN
-
-logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
-logger.ClientCnxnSocketNIO.level = WARN
+logger.ZooKeeper.name = org.apache.zookeeper
+logger.ZooKeeper.level = WARN
 
 logger.DataNucleus.name = DataNucleus
 logger.DataNucleus.level = ERROR

http://git-wip-us.apache.org/repos/asf/hive/blob/af4be3de/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 2337e89..77cfaeb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -170,6 +170,7 @@
     <slf4j.version>1.7.5</slf4j.version>
     <ST4.version>4.0.4</ST4.version>
     <tez.version>0.8.2</tez.version>
+    <slider.version>0.90.2-incubating</slider.version>
     <super-csv.version>2.2.0</super-csv.version>
     <spark.version>1.6.0</spark.version>
     <scala.binary.version>2.10</scala.binary.version>


[40/58] [abbrv] hive git commit: HIVE-12159: Create vectorized readers for the complex types (Owen O'Malley, reviewed by Matt McCline)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
index 8bb32ea..8ee8cd7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TreeReaderFactory.java
@@ -24,6 +24,7 @@ import java.sql.Timestamp;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -35,9 +36,12 @@ import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.ColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.ListColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.TimestampUtils;
+import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.StringExpr;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
@@ -56,8 +60,7 @@ import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.orc.TypeDescription;
 import org.apache.orc.impl.BitFieldReader;
 import org.apache.orc.impl.DynamicByteArray;
 import org.apache.orc.impl.InStream;
@@ -75,60 +78,6 @@ import org.apache.orc.impl.StreamName;
  */
 public class TreeReaderFactory {
 
-  private static final Logger LOG =
-    LoggerFactory.getLogger(TreeReaderFactory.class);
-
-  public static class TreeReaderSchema {
-
-    /**
-     * The types in the ORC file.
-     */
-    List<OrcProto.Type> fileTypes;
-
-    /**
-     * The treeReaderSchema that the reader should read as.
-     */
-    List<OrcProto.Type> schemaTypes;
-
-    /**
-     * The subtype of the row STRUCT.  Different than 0 for ACID.
-     */
-    int innerStructSubtype;
-
-    public TreeReaderSchema() {
-      fileTypes = null;
-      schemaTypes = null;
-      innerStructSubtype = -1;
-    }
-
-    public TreeReaderSchema fileTypes(List<OrcProto.Type> fileTypes) {
-      this.fileTypes = fileTypes;
-      return this;
-    }
-
-    public TreeReaderSchema schemaTypes(List<OrcProto.Type> schemaTypes) {
-      this.schemaTypes = schemaTypes;
-      return this;
-    }
-
-    public TreeReaderSchema innerStructSubtype(int innerStructSubtype) {
-      this.innerStructSubtype = innerStructSubtype;
-      return this;
-    }
-
-    public List<OrcProto.Type> getFileTypes() {
-      return fileTypes;
-    }
-
-    public List<OrcProto.Type> getSchemaTypes() {
-      return schemaTypes;
-    }
-
-    public int getInnerStructSubtype() {
-      return innerStructSubtype;
-    }
-  }
-
   public abstract static class TreeReader {
     protected final int columnId;
     protected BitFieldReader present = null;
@@ -230,36 +179,60 @@ public class TreeReaderFactory {
     }
 
     /**
+     * Called at the top level to read into the given batch.
+     * @param batch the batch to read into
+     * @param batchSize the number of rows to read
+     * @throws IOException
+     */
+    public void nextBatch(VectorizedRowBatch batch,
+                          int batchSize) throws IOException {
+      batch.cols[0].reset();
+      batch.cols[0].ensureSize(batchSize, false);
+      nextVector(batch.cols[0], null, batchSize);
+    }
+
+    /**
      * Populates the isNull vector array in the previousVector object based on
      * the present stream values. This function is called from all the child
      * readers, and they all set the values based on isNull field value.
      *
-     * @param previousVector The columnVector object whose isNull value is populated
+     * @param previous The columnVector object whose isNull value is populated
+     * @param isNull Whether the each value was null at a higher level. If
+     *               isNull is null, all values are non-null.
      * @param batchSize      Size of the column vector
-     * @return next column vector
      * @throws IOException
      */
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      ColumnVector result = (ColumnVector) previousVector;
-      if (present != null) {
+    public void nextVector(ColumnVector previous,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      if (present != null || isNull != null) {
         // Set noNulls and isNull vector of the ColumnVector based on
         // present stream
-        result.noNulls = true;
+        previous.noNulls = true;
+        boolean allNull = true;
         for (int i = 0; i < batchSize; i++) {
-          result.isNull[i] = (present.next() != 1);
-          if (result.noNulls && result.isNull[i]) {
-            result.noNulls = false;
+          if (isNull == null || !isNull[i]) {
+            if (present != null && present.next() != 1) {
+              previous.noNulls = false;
+              previous.isNull[i] = true;
+            } else {
+              previous.isNull[i] = false;
+              allNull = false;
+            }
+          } else {
+            previous.noNulls = false;
+            previous.isNull[i] = true;
           }
         }
+        previous.isRepeating = !previous.noNulls && allNull;
       } else {
-        // There is not present stream, this means that all the values are
+        // There is no present stream, this means that all the values are
         // present.
-        result.noNulls = true;
+        previous.noNulls = true;
         for (int i = 0; i < batchSize; i++) {
-          result.isNull[i] = false;
+          previous.isNull[i] = false;
         }
       }
-      return previousVector;
     }
 
     public BitFieldReader getPresent() {
@@ -267,6 +240,46 @@ public class TreeReaderFactory {
     }
   }
 
+  public static class NullTreeReader extends TreeReader {
+
+    public NullTreeReader(int columnId) throws IOException {
+      super(columnId);
+    }
+
+    @Override
+    public void startStripe(Map<StreamName, InStream> streams,
+                            OrcProto.StripeFooter footer) {
+      // PASS
+    }
+
+    @Override
+    void skipRows(long rows) {
+      // PASS
+    }
+
+    @Override
+    public void seek(PositionProvider position) {
+      // PASS
+    }
+
+    @Override
+    public void seek(PositionProvider[] position) {
+      // PASS
+    }
+
+    @Override
+    Object next(Object previous) {
+      return null;
+    }
+
+    @Override
+    public void nextVector(ColumnVector vector, boolean[] isNull, int size) {
+      vector.noNulls = false;
+      vector.isNull[0] = true;
+      vector.isRepeating = true;
+    }
+  }
+
   public static class BooleanTreeReader extends TreeReader {
     protected BitFieldReader reader = null;
 
@@ -322,20 +335,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final LongColumnVector result;
-      if (previousVector == null) {
-        result = new LongColumnVector();
-      } else {
-        result = (LongColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      LongColumnVector result = (LongColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       // Read value entries based on isNull entries
       reader.nextVector(result, batchSize);
-      return result;
     }
   }
 
@@ -387,20 +396,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final LongColumnVector result;
-      if (previousVector == null) {
-        result = new LongColumnVector();
-      } else {
-        result = (LongColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final LongColumnVector result = (LongColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       // Read value entries based on isNull entries
-      reader.nextVector(result, batchSize);
-      return result;
+      reader.nextVector(result, result.vector, batchSize);
     }
 
     @Override
@@ -473,20 +478,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final LongColumnVector result;
-      if (previousVector == null) {
-        result = new LongColumnVector();
-      } else {
-        result = (LongColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final LongColumnVector result = (LongColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       // Read value entries based on isNull entries
-      reader.nextVector(result, batchSize);
-      return result;
+      reader.nextVector(result, result.vector, batchSize);
     }
 
     @Override
@@ -559,20 +560,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final LongColumnVector result;
-      if (previousVector == null) {
-        result = new LongColumnVector();
-      } else {
-        result = (LongColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final LongColumnVector result = (LongColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       // Read value entries based on isNull entries
-      reader.nextVector(result, batchSize);
-      return result;
+      reader.nextVector(result, result.vector, batchSize);
     }
 
     @Override
@@ -646,20 +643,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final LongColumnVector result;
-      if (previousVector == null) {
-        result = new LongColumnVector();
-      } else {
-        result = (LongColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final LongColumnVector result = (LongColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       // Read value entries based on isNull entries
-      reader.nextVector(result, batchSize);
-      return result;
+      reader.nextVector(result, result.vector, batchSize);
     }
 
     @Override
@@ -719,16 +712,13 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final DoubleColumnVector result;
-      if (previousVector == null) {
-        result = new DoubleColumnVector();
-      } else {
-        result = (DoubleColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final DoubleColumnVector result = (DoubleColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       final boolean hasNulls = !result.noNulls;
       boolean allNulls = hasNulls;
@@ -768,7 +758,6 @@ public class TreeReaderFactory {
         }
         result.isRepeating = repeating;
       }
-      return result;
     }
 
     @Override
@@ -832,16 +821,13 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final DoubleColumnVector result;
-      if (previousVector == null) {
-        result = new DoubleColumnVector();
-      } else {
-        result = (DoubleColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final DoubleColumnVector result = (DoubleColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       final boolean hasNulls = !result.noNulls;
       boolean allNulls = hasNulls;
@@ -881,8 +867,6 @@ public class TreeReaderFactory {
         }
         result.isRepeating = repeating;
       }
-
-      return result;
     }
 
     @Override
@@ -974,19 +958,15 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final BytesColumnVector result;
-      if (previousVector == null) {
-        result = new BytesColumnVector();
-      } else {
-        result = (BytesColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final BytesColumnVector result = (BytesColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       BytesColumnVectorUtil.readOrcByteArrays(stream, lengths, scratchlcv, result, batchSize);
-      return result;
     }
 
     @Override
@@ -1011,7 +991,6 @@ public class TreeReaderFactory {
     private final TimeZone readerTimeZone;
     private TimeZone writerTimeZone;
     private boolean hasSameTZRules;
-    private TimestampWritable scratchTimestampWritable;
 
     TimestampTreeReader(int columnId, boolean skipCorrupt) throws IOException {
       this(columnId, null, null, null, null, skipCorrupt);
@@ -1115,9 +1094,9 @@ public class TreeReaderFactory {
         int newNanos = parseNanos(nanos.next());
         // fix the rounding when we divided by 1000.
         if (millis >= 0) {
-          millis += newNanos / 1000000;
+          millis += newNanos / WriterImpl.NANOS_PER_MILLI;
         } else {
-          millis -= newNanos / 1000000;
+          millis -= newNanos / WriterImpl.NANOS_PER_MILLI;
         }
         long offset = 0;
         // If reader and writer time zones have different rules, adjust the timezone difference
@@ -1144,31 +1123,45 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final TimestampColumnVector result;
-      if (previousVector == null) {
-        result = new TimestampColumnVector();
-      } else {
-        result = (TimestampColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      TimestampColumnVector result = (TimestampColumnVector) previousVector;
+      super.nextVector(previousVector, isNull, batchSize);
 
-      result.reset();
-      if (scratchTimestampWritable == null) {
-        scratchTimestampWritable = new TimestampWritable();
-      }
-      Object obj;
       for (int i = 0; i < batchSize; i++) {
-        obj = next(scratchTimestampWritable);
-        if (obj == null) {
-          result.noNulls = false;
-          result.isNull[i] = true;
-        } else {
-          TimestampWritable writable = (TimestampWritable) obj;
-          result.set(i, writable.getTimestamp());
+        if (result.noNulls || !result.isNull[i]) {
+          long millis = data.next() + base_timestamp;
+          int newNanos = parseNanos(nanos.next());
+          if (millis < 0 && newNanos != 0) {
+            millis -= 1;
+          }
+          millis *= WriterImpl.MILLIS_PER_SECOND;
+          long offset = 0;
+          // If reader and writer time zones have different rules, adjust the timezone difference
+          // between reader and writer taking day light savings into account.
+          if (!hasSameTZRules) {
+            offset = writerTimeZone.getOffset(millis) - readerTimeZone.getOffset(millis);
+          }
+          long adjustedMillis = millis + offset;
+          // Sometimes the reader timezone might have changed after adding the adjustedMillis.
+          // To account for that change, check for any difference in reader timezone after
+          // adding adjustedMillis. If so use the new offset (offset at adjustedMillis point of time).
+          if (!hasSameTZRules &&
+              (readerTimeZone.getOffset(millis) != readerTimeZone.getOffset(adjustedMillis))) {
+            long newOffset =
+                writerTimeZone.getOffset(millis) - readerTimeZone.getOffset(adjustedMillis);
+            adjustedMillis = millis + newOffset;
+          }
+          result.time[i] = adjustedMillis;
+          result.nanos[i] = newNanos;
+          if (result.isRepeating && i != 0 &&
+              (result.time[0] != result.time[i] ||
+                  result.nanos[0] != result.nanos[i])) {
+            result.isRepeating = false;
+          }
         }
       }
-
-      return result;
     }
 
     private static int parseNanos(long serialized) {
@@ -1253,20 +1246,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final LongColumnVector result;
-      if (previousVector == null) {
-        result = new LongColumnVector();
-      } else {
-        result = (LongColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final LongColumnVector result = (LongColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       // Read value entries based on isNull entries
-      reader.nextVector(result, batchSize);
-      return result;
+      reader.nextVector(result, result.vector, batchSize);
     }
 
     @Override
@@ -1278,7 +1267,7 @@ public class TreeReaderFactory {
   public static class DecimalTreeReader extends TreeReader {
     protected InStream valueStream;
     protected IntegerReader scaleReader = null;
-    private LongColumnVector scratchScaleVector;
+    private int[] scratchScaleVector;
 
     private final int precision;
     private final int scale;
@@ -1293,7 +1282,7 @@ public class TreeReaderFactory {
       super(columnId, present);
       this.precision = precision;
       this.scale = scale;
-      this.scratchScaleVector = new LongColumnVector(VectorizedRowBatch.DEFAULT_SIZE);
+      this.scratchScaleVector = new int[VectorizedRowBatch.DEFAULT_SIZE];
       this.valueStream = valueStream;
       if (scaleStream != null && encoding != null) {
         checkEncoding(encoding);
@@ -1352,46 +1341,34 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final DecimalColumnVector result;
-      if (previousVector == null) {
-        result = new DecimalColumnVector(precision, scale);
-      } else {
-        result = (DecimalColumnVector) previousVector;
-      }
-
-      // Save the reference for isNull in the scratch vector
-      boolean[] scratchIsNull = scratchScaleVector.isNull;
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final DecimalColumnVector result = (DecimalColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
+      if (batchSize > scratchScaleVector.length) {
+        scratchScaleVector = new int[(int) batchSize];
+      }
+      scaleReader.nextVector(result, scratchScaleVector, batchSize);
       // Read value entries based on isNull entries
-      if (result.isRepeating) {
-        if (!result.isNull[0]) {
+      if (result.noNulls) {
+        for (int r=0; r < batchSize; ++r) {
           BigInteger bInt = SerializationUtils.readBigInteger(valueStream);
-          short scaleInData = (short) scaleReader.next();
-          HiveDecimal dec = HiveDecimal.create(bInt, scaleInData);
-          dec = HiveDecimal.enforcePrecisionScale(dec, precision, scale);
-          result.set(0, dec);
+          HiveDecimal dec = HiveDecimal.create(bInt, scratchScaleVector[r]);
+          result.set(r, dec);
         }
-      } else {
-        // result vector has isNull values set, use the same to read scale vector.
-        scratchScaleVector.isNull = result.isNull;
-        scaleReader.nextVector(scratchScaleVector, batchSize);
-        for (int i = 0; i < batchSize; i++) {
-          if (!result.isNull[i]) {
+      } else if (!result.isRepeating || !result.isNull[0]) {
+        for (int r=0; r < batchSize; ++r) {
+          if (!result.isNull[r]) {
             BigInteger bInt = SerializationUtils.readBigInteger(valueStream);
-            short scaleInData = (short) scratchScaleVector.vector[i];
-            HiveDecimal dec = HiveDecimal.create(bInt, scaleInData);
-            dec = HiveDecimal.enforcePrecisionScale(dec, precision, scale);
-            result.set(i, dec);
+            HiveDecimal dec = HiveDecimal.create(bInt, scratchScaleVector[r]);
+            result.set(r, dec);
           }
         }
       }
-      // Switch back the null vector.
-      scratchScaleVector.isNull = scratchIsNull;
-      return result;
     }
 
     @Override
@@ -1481,8 +1458,10 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      return reader.nextVector(previousVector, batchSize);
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      reader.nextVector(previousVector, isNull, batchSize);
     }
 
     @Override
@@ -1501,7 +1480,7 @@ public class TreeReaderFactory {
         BytesColumnVector result, final int batchSize) throws IOException {
       // Read lengths
       scratchlcv.isNull = result.isNull;  // Notice we are replacing the isNull vector here...
-      lengths.nextVector(scratchlcv, batchSize);
+      lengths.nextVector(scratchlcv, scratchlcv.vector, batchSize);
       int totalLength = 0;
       if (!scratchlcv.isRepeating) {
         for (int i = 0; i < batchSize; i++) {
@@ -1532,31 +1511,35 @@ public class TreeReaderFactory {
     }
 
     // This method has the common code for reading in bytes into a BytesColumnVector.
-    public static void readOrcByteArrays(InStream stream, IntegerReader lengths,
-        LongColumnVector scratchlcv,
-        BytesColumnVector result, final int batchSize) throws IOException {
-
-      byte[] allBytes = commonReadByteArrays(stream, lengths, scratchlcv, result, batchSize);
-
-      // Too expensive to figure out 'repeating' by comparisons.
-      result.isRepeating = false;
-      int offset = 0;
-      if (!scratchlcv.isRepeating) {
-        for (int i = 0; i < batchSize; i++) {
-          if (!scratchlcv.isNull[i]) {
-            result.setRef(i, allBytes, offset, (int) scratchlcv.vector[i]);
-            offset += scratchlcv.vector[i];
-          } else {
-            result.setRef(i, allBytes, 0, 0);
+    public static void readOrcByteArrays(InStream stream,
+                                         IntegerReader lengths,
+                                         LongColumnVector scratchlcv,
+                                         BytesColumnVector result,
+                                         int batchSize) throws IOException {
+      if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
+        byte[] allBytes = commonReadByteArrays(stream, lengths, scratchlcv,
+            result, (int) batchSize);
+
+        // Too expensive to figure out 'repeating' by comparisons.
+        result.isRepeating = false;
+        int offset = 0;
+        if (!scratchlcv.isRepeating) {
+          for (int i = 0; i < batchSize; i++) {
+            if (!scratchlcv.isNull[i]) {
+              result.setRef(i, allBytes, offset, (int) scratchlcv.vector[i]);
+              offset += scratchlcv.vector[i];
+            } else {
+              result.setRef(i, allBytes, 0, 0);
+            }
           }
-        }
-      } else {
-        for (int i = 0; i < batchSize; i++) {
-          if (!scratchlcv.isNull[i]) {
-            result.setRef(i, allBytes, offset, (int) scratchlcv.vector[0]);
-            offset += scratchlcv.vector[0];
-          } else {
-            result.setRef(i, allBytes, 0, 0);
+        } else {
+          for (int i = 0; i < batchSize; i++) {
+            if (!scratchlcv.isNull[i]) {
+              result.setRef(i, allBytes, offset, (int) scratchlcv.vector[0]);
+              offset += scratchlcv.vector[0];
+            } else {
+              result.setRef(i, allBytes, 0, 0);
+            }
           }
         }
       }
@@ -1641,19 +1624,16 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final BytesColumnVector result;
-      if (previousVector == null) {
-        result = new BytesColumnVector();
-      } else {
-        result = (BytesColumnVector) previousVector;
-      }
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final BytesColumnVector result = (BytesColumnVector) previousVector;
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
-      BytesColumnVectorUtil.readOrcByteArrays(stream, lengths, scratchlcv, result, batchSize);
-      return result;
+      BytesColumnVectorUtil.readOrcByteArrays(stream, lengths, scratchlcv,
+          result, batchSize);
     }
 
     @Override
@@ -1816,18 +1796,15 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final BytesColumnVector result;
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      final BytesColumnVector result = (BytesColumnVector) previousVector;
       int offset;
       int length;
-      if (previousVector == null) {
-        result = new BytesColumnVector();
-      } else {
-        result = (BytesColumnVector) previousVector;
-      }
 
       // Read present/isNull stream
-      super.nextVector(result, batchSize);
+      super.nextVector(result, isNull, batchSize);
 
       if (dictionaryBuffer != null) {
 
@@ -1838,7 +1815,8 @@ public class TreeReaderFactory {
 
         // Read string offsets
         scratchlcv.isNull = result.isNull;
-        reader.nextVector(scratchlcv, batchSize);
+        scratchlcv.ensureSize((int) batchSize, false);
+        reader.nextVector(scratchlcv, scratchlcv.vector, batchSize);
         if (!scratchlcv.isRepeating) {
 
           // The vector has non-repeating strings. Iterate thru the batch
@@ -1878,7 +1856,6 @@ public class TreeReaderFactory {
           }
         }
       }
-      return result;
     }
 
     int getDictionaryEntryLength(int entry, int offset) {
@@ -1936,11 +1913,13 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
       // Get the vector of strings from StringTreeReader, then make a 2nd pass to
       // adjust down the length (right trim and truncate) if necessary.
-      BytesColumnVector result = (BytesColumnVector) super.nextVector(previousVector, batchSize);
-
+      super.nextVector(previousVector, isNull, batchSize);
+      BytesColumnVector result = (BytesColumnVector) previousVector;
       int adjustedDownLen;
       if (result.isRepeating) {
         if (result.noNulls || !result.isNull[0]) {
@@ -1973,7 +1952,6 @@ public class TreeReaderFactory {
           }
         }
       }
-      return result;
     }
   }
 
@@ -2010,10 +1988,13 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
       // Get the vector of strings from StringTreeReader, then make a 2nd pass to
       // adjust down the length (truncate) if necessary.
-      BytesColumnVector result = (BytesColumnVector) super.nextVector(previousVector, batchSize);
+      super.nextVector(previousVector, isNull, batchSize);
+      BytesColumnVector result = (BytesColumnVector) previousVector;
 
       int adjustedDownLen;
       if (result.isRepeating) {
@@ -2045,62 +2026,26 @@ public class TreeReaderFactory {
           }
         }
       }
-      return result;
     }
   }
 
   protected static class StructTreeReader extends TreeReader {
-    private final int readColumnCount;
-    private final int resultColumnCount;
     protected final TreeReader[] fields;
-    private final String[] fieldNames;
 
-    protected StructTreeReader(
-        int columnId,
-        TreeReaderSchema treeReaderSchema,
-        boolean[] included,
-        boolean skipCorrupt) throws IOException {
+    protected StructTreeReader(int columnId,
+                               TypeDescription readerSchema,
+                               SchemaEvolution treeReaderSchema,
+                               boolean[] included,
+                               boolean skipCorrupt) throws IOException {
       super(columnId);
 
-      OrcProto.Type fileStructType = treeReaderSchema.getFileTypes().get(columnId);
-
-      OrcProto.Type schemaStructType = treeReaderSchema.getSchemaTypes().get(columnId);
+      TypeDescription fileSchema = treeReaderSchema.getFileType(readerSchema);
 
-      readColumnCount = Math.min(fileStructType.getFieldNamesCount(), schemaStructType.getFieldNamesCount());
-
-      if (columnId == treeReaderSchema.getInnerStructSubtype()) {
-        // If there are more result columns than reader columns, we will default those additional
-        // columns to NULL.
-        resultColumnCount = schemaStructType.getFieldNamesCount();
-      } else {
-        resultColumnCount = readColumnCount;
-      }
-
-      this.fields = new TreeReader[readColumnCount];
-      this.fieldNames = new String[readColumnCount];
-
-      if (included == null) {
-        for (int i = 0; i < readColumnCount; ++i) {
-          int subtype = schemaStructType.getSubtypes(i);
-          this.fields[i] = createTreeReader(subtype, treeReaderSchema, included, skipCorrupt);
-          // Use the treeReaderSchema evolution name since file/reader types may not have the real column name.
-          this.fieldNames[i] = schemaStructType.getFieldNames(i);
-        }
-      } else {
-        for (int i = 0; i < readColumnCount; ++i) {
-          int subtype = schemaStructType.getSubtypes(i);
-          if (subtype >= included.length) {
-            throw new IOException("subtype " + subtype + " exceeds the included array size " +
-                included.length + " fileTypes " + treeReaderSchema.getFileTypes().toString() +
-                " schemaTypes " + treeReaderSchema.getSchemaTypes().toString() +
-                " innerStructSubtype " + treeReaderSchema.getInnerStructSubtype());
-          }
-          if (included[subtype]) {
-            this.fields[i] = createTreeReader(subtype, treeReaderSchema, included, skipCorrupt);
-          }
-          // Use the treeReaderSchema evolution name since file/reader types may not have the real column name.
-          this.fieldNames[i] = schemaStructType.getFieldNames(i);
-        }
+      List<TypeDescription> childrenTypes = readerSchema.getChildren();
+      this.fields = new TreeReader[childrenTypes.size()];
+      for (int i = 0; i < fields.length; ++i) {
+        TypeDescription subtype = childrenTypes.get(i);
+        this.fields[i] = createTreeReader(subtype, treeReaderSchema, included, skipCorrupt);
       }
     }
 
@@ -2120,65 +2065,52 @@ public class TreeReaderFactory {
       OrcStruct result = null;
       if (valuePresent) {
         if (previous == null) {
-          result = new OrcStruct(resultColumnCount);
+          result = new OrcStruct(fields.length);
         } else {
           result = (OrcStruct) previous;
 
           // If the input format was initialized with a file with a
           // different number of fields, the number of fields needs to
           // be updated to the correct number
-          if (result.getNumFields() != resultColumnCount) {
-            result.setNumFields(resultColumnCount);
-          }
+          result.setNumFields(fields.length);
         }
-        for (int i = 0; i < readColumnCount; ++i) {
+        for (int i = 0; i < fields.length; ++i) {
           if (fields[i] != null) {
             result.setFieldValue(i, fields[i].next(result.getFieldValue(i)));
           }
         }
-        if (resultColumnCount > readColumnCount) {
-          for (int i = readColumnCount; i < resultColumnCount; ++i) {
-            // Default new treeReaderSchema evolution fields to NULL.
-            result.setFieldValue(i, null);
-          }
-        }
       }
       return result;
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      final ColumnVector[] result;
-      if (previousVector == null) {
-        result = new ColumnVector[readColumnCount];
-      } else {
-        result = (ColumnVector[]) previousVector;
+    public void nextBatch(VectorizedRowBatch batch,
+                          int batchSize) throws IOException {
+      for(int i=0; i < fields.length &&
+          (vectorColumnCount == -1 || i < vectorColumnCount); ++i) {
+        batch.cols[i].reset();
+        batch.cols[i].ensureSize((int) batchSize, false);
+        fields[i].nextVector(batch.cols[i], null, batchSize);
       }
+    }
 
-      // Read all the members of struct as column vectors
-      for (int i = 0; i < readColumnCount; i++) {
-        if (fields[i] != null) {
-          if (result[i] == null) {
-            result[i] = (ColumnVector) fields[i].nextVector(null, batchSize);
-          } else {
-            fields[i].nextVector(result[i], batchSize);
-          }
-        }
-      }
+    @Override
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      super.nextVector(previousVector, isNull, batchSize);
+      StructColumnVector result = (StructColumnVector) previousVector;
+      if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
+        result.isRepeating = false;
 
-      // Default additional treeReaderSchema evolution fields to NULL.
-      if (vectorColumnCount != -1 && vectorColumnCount > readColumnCount) {
-        for (int i = readColumnCount; i < vectorColumnCount; ++i) {
-          ColumnVector colVector = result[i];
-          if (colVector != null) {
-            colVector.isRepeating = true;
-            colVector.noNulls = false;
-            colVector.isNull[0] = true;
+        // Read all the members of struct as column vectors
+        boolean[] mask = result.noNulls ? null : result.isNull;
+        for (int f = 0; f < fields.length; f++) {
+          if (fields[f] != null) {
+            fields[f].nextVector(result.fields[f], mask, batchSize);
           }
         }
       }
-
-      return result;
     }
 
     @Override
@@ -2208,19 +2140,18 @@ public class TreeReaderFactory {
     protected final TreeReader[] fields;
     protected RunLengthByteReader tags;
 
-    protected UnionTreeReader(int columnId,
-        TreeReaderSchema treeReaderSchema,
-        boolean[] included,
-        boolean skipCorrupt) throws IOException {
-      super(columnId);
-      OrcProto.Type type = treeReaderSchema.getSchemaTypes().get(columnId);
-      int fieldCount = type.getSubtypesCount();
+    protected UnionTreeReader(int fileColumn,
+                              TypeDescription readerSchema,
+                              SchemaEvolution treeReaderSchema,
+                              boolean[] included,
+                              boolean skipCorrupt) throws IOException {
+      super(fileColumn);
+      List<TypeDescription> childrenTypes = readerSchema.getChildren();
+      int fieldCount = childrenTypes.size();
       this.fields = new TreeReader[fieldCount];
       for (int i = 0; i < fieldCount; ++i) {
-        int subtype = type.getSubtypes(i);
-        if (included == null || included[subtype]) {
-          this.fields[i] = createTreeReader(subtype, treeReaderSchema, included, skipCorrupt);
-        }
+        TypeDescription subtype = childrenTypes.get(i);
+        this.fields[i] = createTreeReader(subtype, treeReaderSchema, included, skipCorrupt);
       }
     }
 
@@ -2252,9 +2183,25 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previousVector, final int batchSize) throws IOException {
-      throw new UnsupportedOperationException(
-          "NextVector is not supported operation for Union type");
+    public void nextVector(ColumnVector previousVector,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      UnionColumnVector result = (UnionColumnVector) previousVector;
+      super.nextVector(result, isNull, batchSize);
+      if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
+        result.isRepeating = false;
+        tags.nextVector(result.noNulls ? null : result.isNull, result.tags,
+            batchSize);
+        boolean[] ignore = new boolean[(int) batchSize];
+        for (int f = 0; f < result.fields.length; ++f) {
+          // build the ignore list for this tag
+          for (int r = 0; r < batchSize; ++r) {
+            ignore[r] = (!result.noNulls && result.isNull[r]) ||
+                result.tags[r] != f;
+          }
+          fields[f].nextVector(result.fields[f], ignore, batchSize);
+        }
+      }
     }
 
     @Override
@@ -2288,13 +2235,15 @@ public class TreeReaderFactory {
     protected final TreeReader elementReader;
     protected IntegerReader lengths = null;
 
-    protected ListTreeReader(int columnId,
-        TreeReaderSchema treeReaderSchema,
-        boolean[] included,
-        boolean skipCorrupt) throws IOException {
-      super(columnId);
-      OrcProto.Type type = treeReaderSchema.getSchemaTypes().get(columnId);
-      elementReader = createTreeReader(type.getSubtypes(0), treeReaderSchema, included, skipCorrupt);
+    protected ListTreeReader(int fileColumn,
+                             TypeDescription readerSchema,
+                             SchemaEvolution treeReaderSchema,
+                             boolean[] included,
+                             boolean skipCorrupt) throws IOException {
+      super(fileColumn);
+      TypeDescription elementType = readerSchema.getChildren().get(0);
+      elementReader = createTreeReader(elementType, treeReaderSchema, included,
+          skipCorrupt);
     }
 
     @Override
@@ -2335,9 +2284,27 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previous, final int batchSize) throws IOException {
-      throw new UnsupportedOperationException(
-          "NextVector is not supported operation for List type");
+    public void nextVector(ColumnVector previous,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      ListColumnVector result = (ListColumnVector) previous;
+      super.nextVector(result, isNull, batchSize);
+      // if we have some none-null values, then read them
+      if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
+        lengths.nextVector(result, result.lengths, batchSize);
+        // even with repeating lengths, the list doesn't repeat
+        result.isRepeating = false;
+        // build the offsets vector and figure out how many children to read
+        result.childCount = 0;
+        for (int r = 0; r < batchSize; ++r) {
+          if (result.noNulls || !result.isNull[r]) {
+            result.offsets[r] = result.childCount;
+            result.childCount += result.lengths[r];
+          }
+        }
+        result.child.ensureSize(result.childCount, false);
+        elementReader.nextVector(result.child, null, result.childCount);
+      }
     }
 
     @Override
@@ -2378,24 +2345,16 @@ public class TreeReaderFactory {
     protected final TreeReader valueReader;
     protected IntegerReader lengths = null;
 
-    protected MapTreeReader(int columnId,
-        TreeReaderSchema treeReaderSchema,
-        boolean[] included,
-        boolean skipCorrupt) throws IOException {
-      super(columnId);
-      OrcProto.Type type = treeReaderSchema.getSchemaTypes().get(columnId);
-      int keyColumn = type.getSubtypes(0);
-      int valueColumn = type.getSubtypes(1);
-      if (included == null || included[keyColumn]) {
-        keyReader = createTreeReader(keyColumn, treeReaderSchema, included, skipCorrupt);
-      } else {
-        keyReader = null;
-      }
-      if (included == null || included[valueColumn]) {
-        valueReader = createTreeReader(valueColumn, treeReaderSchema, included, skipCorrupt);
-      } else {
-        valueReader = null;
-      }
+    protected MapTreeReader(int fileColumn,
+                            TypeDescription readerSchema,
+                            SchemaEvolution treeReaderSchema,
+                            boolean[] included,
+                            boolean skipCorrupt) throws IOException {
+      super(fileColumn);
+      TypeDescription keyType = readerSchema.getChildren().get(0);
+      TypeDescription valueType = readerSchema.getChildren().get(1);
+      keyReader = createTreeReader(keyType, treeReaderSchema, included, skipCorrupt);
+      valueReader = createTreeReader(valueType, treeReaderSchema, included, skipCorrupt);
     }
 
     @Override
@@ -2429,9 +2388,28 @@ public class TreeReaderFactory {
     }
 
     @Override
-    public Object nextVector(Object previous, final int batchSize) throws IOException {
-      throw new UnsupportedOperationException(
-          "NextVector is not supported operation for Map type");
+    public void nextVector(ColumnVector previous,
+                           boolean[] isNull,
+                           int batchSize) throws IOException {
+      MapColumnVector result = (MapColumnVector) previous;
+      super.nextVector(result, isNull, batchSize);
+      if (result.noNulls || !(result.isRepeating && result.isNull[0])) {
+        lengths.nextVector(result, result.lengths, batchSize);
+        // even with repeating lengths, the map doesn't repeat
+        result.isRepeating = false;
+        // build the offsets vector and figure out how many children to read
+        result.childCount = 0;
+        for (int r = 0; r < batchSize; ++r) {
+          if (result.noNulls || !result.isNull[r]) {
+            result.offsets[r] = result.childCount;
+            result.childCount += result.lengths[r];
+          }
+        }
+        result.keys.ensureSize(result.childCount, false);
+        result.values.ensureSize(result.childCount, false);
+        keyReader.nextVector(result.keys, null, result.childCount);
+        valueReader.nextVector(result.values, null, result.childCount);
+      }
     }
 
     @Override
@@ -2471,61 +2449,61 @@ public class TreeReaderFactory {
     }
   }
 
-  public static TreeReader createTreeReader(int columnId,
-      TreeReaderSchema treeReaderSchema,
-      boolean[] included,
-      boolean skipCorrupt
-  ) throws IOException {
-    OrcProto.Type type = treeReaderSchema.getSchemaTypes().get(columnId);
-    switch (type.getKind()) {
+  public static TreeReader createTreeReader(TypeDescription readerType,
+                                            SchemaEvolution evolution,
+                                            boolean[] included,
+                                            boolean skipCorrupt
+                                            ) throws IOException {
+    TypeDescription fileType = evolution.getFileType(readerType);
+    if (fileType == null ||
+        (included != null && !included[readerType.getId()])) {
+      return new NullTreeReader(0);
+    }
+    switch (readerType.getCategory()) {
       case BOOLEAN:
-        return new BooleanTreeReader(columnId);
+        return new BooleanTreeReader(fileType.getId());
       case BYTE:
-        return new ByteTreeReader(columnId);
+        return new ByteTreeReader(fileType.getId());
       case DOUBLE:
-        return new DoubleTreeReader(columnId);
+        return new DoubleTreeReader(fileType.getId());
       case FLOAT:
-        return new FloatTreeReader(columnId);
+        return new FloatTreeReader(fileType.getId());
       case SHORT:
-        return new ShortTreeReader(columnId);
+        return new ShortTreeReader(fileType.getId());
       case INT:
-        return new IntTreeReader(columnId);
+        return new IntTreeReader(fileType.getId());
       case LONG:
-        return new LongTreeReader(columnId, skipCorrupt);
+        return new LongTreeReader(fileType.getId(), skipCorrupt);
       case STRING:
-        return new StringTreeReader(columnId);
+        return new StringTreeReader(fileType.getId());
       case CHAR:
-        if (!type.hasMaximumLength()) {
-          throw new IllegalArgumentException("ORC char type has no length specified");
-        }
-        return new CharTreeReader(columnId, type.getMaximumLength());
+        return new CharTreeReader(fileType.getId(), readerType.getMaxLength());
       case VARCHAR:
-        if (!type.hasMaximumLength()) {
-          throw new IllegalArgumentException("ORC varchar type has no length specified");
-        }
-        return new VarcharTreeReader(columnId, type.getMaximumLength());
+        return new VarcharTreeReader(fileType.getId(), readerType.getMaxLength());
       case BINARY:
-        return new BinaryTreeReader(columnId);
+        return new BinaryTreeReader(fileType.getId());
       case TIMESTAMP:
-        return new TimestampTreeReader(columnId, skipCorrupt);
+        return new TimestampTreeReader(fileType.getId(), skipCorrupt);
       case DATE:
-        return new DateTreeReader(columnId);
+        return new DateTreeReader(fileType.getId());
       case DECIMAL:
-        int precision =
-            type.hasPrecision() ? type.getPrecision() : HiveDecimal.SYSTEM_DEFAULT_PRECISION;
-        int scale = type.hasScale() ? type.getScale() : HiveDecimal.SYSTEM_DEFAULT_SCALE;
-        return new DecimalTreeReader(columnId, precision, scale);
+        return new DecimalTreeReader(fileType.getId(), readerType.getPrecision(),
+            readerType.getScale());
       case STRUCT:
-        return new StructTreeReader(columnId, treeReaderSchema, included, skipCorrupt);
+        return new StructTreeReader(fileType.getId(), readerType,
+            evolution, included, skipCorrupt);
       case LIST:
-        return new ListTreeReader(columnId, treeReaderSchema, included, skipCorrupt);
+        return new ListTreeReader(fileType.getId(), readerType,
+            evolution, included, skipCorrupt);
       case MAP:
-        return new MapTreeReader(columnId, treeReaderSchema, included, skipCorrupt);
+        return new MapTreeReader(fileType.getId(), readerType, evolution,
+            included, skipCorrupt);
       case UNION:
-        return new UnionTreeReader(columnId, treeReaderSchema, included, skipCorrupt);
+        return new UnionTreeReader(fileType.getId(), readerType,
+            evolution, included, skipCorrupt);
       default:
         throw new IllegalArgumentException("Unsupported type " +
-            type.getKind());
+            readerType.getCategory());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
index 816b52d..e4d2e6e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/VectorizedOrcInputFormat.java
@@ -71,14 +71,29 @@ public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, Vect
         OrcInputFormat.raiseAcidTablesMustBeReadWithAcidReaderException(conf);
       }
 
+      rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
       /**
        * Do we have schema on read in the configuration variables?
        */
-      TypeDescription schema = OrcInputFormat.getDesiredRowTypeDescr(conf, /* isAcidRead */ false);
-
       List<OrcProto.Type> types = file.getTypes();
-      Reader.Options options = new Reader.Options();
-      options.schema(schema);
+      int dataColumns = rbCtx.getDataColumnCount();
+      TypeDescription schema =
+          OrcInputFormat.getDesiredRowTypeDescr(conf, false, dataColumns);
+      if (schema == null) {
+        schema = file.getSchema();
+        // Even if the user isn't doing schema evolution, cut the schema
+        // to the desired size.
+        if (schema.getCategory() == TypeDescription.Category.STRUCT &&
+            schema.getChildren().size() > dataColumns) {
+          schema = schema.clone();
+          List<TypeDescription> children = schema.getChildren();
+          for(int c = children.size() - 1; c >= dataColumns; --c) {
+            children.remove(c);
+          }
+        }
+      }
+      Reader.Options options = new Reader.Options().schema(schema);
+
       this.offset = fileSplit.getStart();
       this.length = fileSplit.getLength();
       options.range(offset, length);
@@ -87,8 +102,6 @@ public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, Vect
 
       this.reader = file.rowsOptions(options);
 
-      rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
-
       columnsToIncludeTruncated = rbCtx.getColumnsToIncludeTruncated(conf);
 
       int partitionColumnCount = rbCtx.getPartitionColumnCount();
@@ -103,9 +116,6 @@ public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, Vect
     @Override
     public boolean next(NullWritable key, VectorizedRowBatch value) throws IOException {
 
-      if (!reader.hasNext()) {
-        return false;
-      }
       try {
         // Check and update partition cols if necessary. Ideally, this should be done
         // in CreateValue as the partition is constant per split. But since Hive uses
@@ -118,7 +128,9 @@ public class VectorizedOrcInputFormat extends FileInputFormat<NullWritable, Vect
           }
           addPartitionCols = false;
         }
-        reader.nextBatch(value);
+        if (!reader.nextBatch(value)) {
+          return false;
+        }
       } catch (Exception e) {
         throw new RuntimeException(e);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
index 70fe803..8e52907 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java
@@ -101,8 +101,6 @@ public class WriterImpl extends org.apache.orc.impl.WriterImpl implements Writer
     }
   }
 
-  private static final long NANOS_PER_MILLI = 1000000;
-
   /**
    * Set the value for a given column value within a batch.
    * @param rowId the row to set

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestTypeDescription.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestTypeDescription.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestTypeDescription.java
index 2a82092..96af65a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestTypeDescription.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestTypeDescription.java
@@ -51,11 +51,11 @@ public class TestTypeDescription {
             .addField("f4", TypeDescription.createDouble())
             .addField("f5", TypeDescription.createBoolean()))
         .addField("f6", TypeDescription.createChar().withMaxLength(100));
-    assertEquals("struct<f1:union<tinyint,decimal(20,10)>,f2:struct<f3:date,f4:double,f5:boolean>,f6:char(100)>",
+    assertEquals("struct<f1:uniontype<tinyint,decimal(20,10)>,f2:struct<f3:date,f4:double,f5:boolean>,f6:char(100)>",
         struct.toString());
     assertEquals(
         "{\"category\": \"struct\", \"id\": 0, \"max\": 8, \"fields\": [\n" +
-            "  \"f1\": {\"category\": \"union\", \"id\": 1, \"max\": 3, \"children\": [\n" +
+            "  \"f1\": {\"category\": \"uniontype\", \"id\": 1, \"max\": 3, \"children\": [\n" +
             "    {\"category\": \"tinyint\", \"id\": 2, \"max\": 2},\n" +
             "    {\"category\": \"decimal\", \"id\": 3, \"max\": 3, \"precision\": 20, \"scale\": 10}]},\n" +
             "  \"f2\": {\"category\": \"struct\", \"id\": 4, \"max\": 7, \"fields\": [\n" +


[51/58] [abbrv] hive git commit: HIVE-13465: Add ZK settings to MiniLlapCluster clusterSpecificConfiguration (Jason Dere, reviewed by Siddharth Seth)

Posted by jd...@apache.org.
HIVE-13465: Add ZK settings to MiniLlapCluster clusterSpecificConfiguration (Jason Dere, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cc2d0f03
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cc2d0f03
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cc2d0f03

Branch: refs/heads/llap
Commit: cc2d0f036739374234f9518a92d289c891ce047a
Parents: 2ba31f9
Author: Jason Dere <jd...@hortonworks.com>
Authored: Thu Apr 14 16:08:26 2016 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Thu Apr 14 16:08:26 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java     | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cc2d0f03/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
----------------------------------------------------------------------
diff --git a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
index 9871702..610f266 100644
--- a/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
+++ b/llap-server/src/test/org/apache/hadoop/hive/llap/daemon/MiniLlapCluster.java
@@ -185,6 +185,10 @@ public class MiniLlapCluster extends AbstractService {
     conf.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + clusterNameTrimmed);
     conf.set(ConfVars.HIVE_ZOOKEEPER_QUORUM.varname, "localhost");
     conf.setInt(ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT.varname, miniZooKeeperCluster.getClientPort());
+    // Also add ZK settings to clusterSpecificConf to make sure these get picked up by whoever started this.
+    clusterSpecificConfiguration.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + clusterNameTrimmed);
+    clusterSpecificConfiguration.set(ConfVars.HIVE_ZOOKEEPER_QUORUM.varname, "localhost");
+    clusterSpecificConfiguration.setInt(ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT.varname, miniZooKeeperCluster.getClientPort());
   
     LOG.info("Initializing {} llap instances for MiniLlapCluster with name={}", numInstances, clusterNameTrimmed);
     for (int i = 0 ;i < numInstances ; i++) {
@@ -207,7 +211,6 @@ public class MiniLlapCluster extends AbstractService {
     // used by containers and LLAP
     clusterSpecificConfiguration
         .setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
-    clusterSpecificConfiguration.set(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname, "@" + clusterNameTrimmed);
   }
 
   @Override


[31/58] [abbrv] hive git commit: HIVE-11484 : Fix ObjectInspector for Char and VarChar for null and string objects (Deepak Barr, reviewed by Amareshwari)

Posted by jd...@apache.org.
HIVE-11484 : Fix ObjectInspector for Char and VarChar for null and string objects (Deepak Barr, reviewed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ddab69c4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ddab69c4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ddab69c4

Branch: refs/heads/llap
Commit: ddab69c495ce6c325d7e06cfb0c704546e0d56b9
Parents: 94c1974
Author: Deepak Barr <de...@gmail.com>
Authored: Wed Apr 13 09:50:24 2016 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Wed Apr 13 09:50:24 2016 +0530

----------------------------------------------------------------------
 .../hadoop/hive/common/type/HiveBaseChar.java   |  6 ++
 .../hadoop/hive/common/type/HiveVarchar.java    |  4 +-
 .../hive/common/type/TestHiveBaseChar.java      |  2 +
 .../primitive/JavaHiveCharObjectInspector.java  | 15 +++-
 .../JavaHiveVarcharObjectInspector.java         | 15 +++-
 .../PrimitiveObjectInspectorConverter.java      |  8 +-
 .../TestObjectInspectorConverters.java          | 95 ++++++++++++++++++++
 .../TestStandardObjectInspectors.java           | 14 ++-
 8 files changed, 148 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java b/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
index 3514726..53684e7 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveBaseChar.java
@@ -38,6 +38,9 @@ public abstract class HiveBaseChar {
   }
 
   public static String enforceMaxLength(String val, int maxLength) {
+    if (val == null) {
+      return null;
+    }
     String value = val;
 
     if (maxLength > 0) {
@@ -52,6 +55,9 @@ public abstract class HiveBaseChar {
   }
 
   public static String getPaddedValue(String val, int maxLength) {
+    if (val == null) {
+      return null;
+    }
     if (maxLength < 0) {
       return val;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/common/src/java/org/apache/hadoop/hive/common/type/HiveVarchar.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveVarchar.java b/common/src/java/org/apache/hadoop/hive/common/type/HiveVarchar.java
index 969d474..09009eb 100644
--- a/common/src/java/org/apache/hadoop/hive/common/type/HiveVarchar.java
+++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveVarchar.java
@@ -58,10 +58,10 @@ public class HiveVarchar extends HiveBaseChar
     return this.getValue().compareTo(rhs.getValue());
   }
 
-  public boolean equals(HiveVarchar rhs) {
+  public boolean equals(Object rhs) {
     if (rhs == this) {
       return true;
     }
-    return this.getValue().equals(rhs.getValue());
+    return this.getValue().equals(((HiveVarchar)rhs).getValue());
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/common/src/test/org/apache/hadoop/hive/common/type/TestHiveBaseChar.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveBaseChar.java b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveBaseChar.java
index 2848465..98ad74a 100644
--- a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveBaseChar.java
+++ b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveBaseChar.java
@@ -80,6 +80,7 @@ public class TestHiveBaseChar extends TestCase {
         assertEquals(strLen, enforcedString.codePointCount(0, enforcedString.length()));
       }
     }
+    assertNull(HiveBaseChar.enforceMaxLength(null, 0));
   }
 
   public void testGetPaddedValue() {
@@ -96,5 +97,6 @@ public class TestHiveBaseChar extends TestCase {
 
     assertEquals("abc       ", HiveBaseChar.getPaddedValue("abc", 10));
     assertEquals("abc       ", HiveBaseChar.getPaddedValue("abc ", 10));
+    assertNull(HiveBaseChar.getPaddedValue(null, 0));
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java
index f429709..3d89c92 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveCharObjectInspector.java
@@ -37,7 +37,12 @@ public class JavaHiveCharObjectInspector extends AbstractPrimitiveJavaObjectInsp
     if (o == null) {
       return null;
     }
-    HiveChar value = (HiveChar) o;
+    HiveChar value;
+    if (o instanceof String) {
+      value = new HiveChar((String) o, getMaxLength());
+    } else {
+      value = (HiveChar) o;
+    }
     if (BaseCharUtils.doesPrimitiveMatchTypeParams(value, (CharTypeInfo) typeInfo)) {
       return value;
     }
@@ -49,7 +54,13 @@ public class JavaHiveCharObjectInspector extends AbstractPrimitiveJavaObjectInsp
     if (o == null) {
       return null;
     }
-    return getWritableWithParams((HiveChar) o);
+    HiveChar var;
+    if (o instanceof String) {
+      var = new HiveChar((String) o, getMaxLength());
+    } else {
+      var = (HiveChar) o;
+    }
+    return getWritableWithParams(var);
   }
 
   private HiveChar getPrimitiveWithParams(HiveChar val) {

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveVarcharObjectInspector.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveVarcharObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveVarcharObjectInspector.java
index a8e34ff..2320d2d 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveVarcharObjectInspector.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/JavaHiveVarcharObjectInspector.java
@@ -38,7 +38,12 @@ public class JavaHiveVarcharObjectInspector extends AbstractPrimitiveJavaObjectI
     if (o == null) {
       return null;
     }
-    HiveVarchar value = (HiveVarchar) o;
+    HiveVarchar value;
+    if (o instanceof String) {
+      value= new HiveVarchar((String)o, getMaxLength());
+    } else {
+      value = (HiveVarchar)o;
+    }
     if (BaseCharUtils.doesPrimitiveMatchTypeParams(value, (VarcharTypeInfo) typeInfo)) {
       return value;
     }
@@ -51,7 +56,13 @@ public class JavaHiveVarcharObjectInspector extends AbstractPrimitiveJavaObjectI
     if (o == null) {
       return null;
     }
-    return getWritableWithParams((HiveVarchar) o);
+    HiveVarchar var;
+    if (o instanceof String) {
+      var= new HiveVarchar((String)o, getMaxLength());
+    } else {
+      var = (HiveVarchar)o;
+    }
+    return getWritableWithParams(var);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
index 5eb41d5..e08ad43 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/PrimitiveObjectInspectorConverter.java
@@ -514,7 +514,7 @@ public class PrimitiveObjectInspectorConverter {
 
     PrimitiveObjectInspector inputOI;
     SettableHiveVarcharObjectInspector outputOI;
-    HiveVarcharWritable hc;
+    Object hc;
 
     public HiveVarcharConverter(PrimitiveObjectInspector inputOI,
         SettableHiveVarcharObjectInspector outputOI) {
@@ -528,7 +528,7 @@ public class PrimitiveObjectInspectorConverter {
       //if (typeParams == null) {
       //  throw new RuntimeException("varchar type used without type params");
       //}
-      hc = new HiveVarcharWritable();
+      hc = outputOI.create(new HiveVarchar("",-1));
     }
 
     @Override
@@ -551,13 +551,13 @@ public class PrimitiveObjectInspectorConverter {
   public static class HiveCharConverter implements Converter {
     PrimitiveObjectInspector inputOI;
     SettableHiveCharObjectInspector outputOI;
-    HiveCharWritable hc;
+    Object hc;
 
     public HiveCharConverter(PrimitiveObjectInspector inputOI,
         SettableHiveCharObjectInspector outputOI) {
       this.inputOI = inputOI;
       this.outputOI = outputOI;
-      hc = new HiveCharWritable();
+      hc = outputOI.create(new HiveChar("",-1));
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java b/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java
index dd18517..2e1bb22 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestObjectInspectorConverters.java
@@ -22,10 +22,15 @@ import java.util.List;
 
 import junit.framework.TestCase;
 
+import org.apache.hadoop.hive.common.type.HiveChar;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
+import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
+import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
 import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
@@ -116,6 +121,96 @@ public class TestObjectInspectorConverters extends TestCase {
           .convert(Integer.valueOf(1)));
       assertEquals("DoubleConverter", null, doubleConverter.convert(null));
 
+      // Char
+      Converter charConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaBooleanObjectInspector,
+        PrimitiveObjectInspectorFactory.javaHiveCharObjectInspector);
+      assertEquals("CharConverter", new HiveChar("TRUE", -1), charConverter
+        .convert(Boolean.valueOf(true)));
+      assertEquals("CharConverter", new HiveChar("FALSE", -1), charConverter
+        .convert(Boolean.valueOf(false)));
+
+      charConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaBooleanObjectInspector,
+        PrimitiveObjectInspectorFactory.writableHiveCharObjectInspector);
+      assertEquals("CharConverter", new HiveCharWritable(new HiveChar("TRUE", -1)), charConverter
+        .convert(Boolean.valueOf(true)));
+      assertEquals("CharConverter", new HiveCharWritable(new HiveChar("FALSE", -1)), charConverter
+        .convert(Boolean.valueOf(false)));
+
+      charConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaIntObjectInspector,
+        PrimitiveObjectInspectorFactory.javaHiveCharObjectInspector);
+      assertEquals("CharConverter", new HiveChar("0", -1), charConverter
+        .convert(Integer.valueOf(0)));
+      assertEquals("CharConverter", new HiveChar("1", -1), charConverter
+        .convert(Integer.valueOf(1)));
+
+      charConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaIntObjectInspector,
+        PrimitiveObjectInspectorFactory.writableHiveCharObjectInspector);
+      assertEquals("CharConverter", new HiveCharWritable(new HiveChar("0", -1)), charConverter
+        .convert(Integer.valueOf(0)));
+      assertEquals("CharConverter", new HiveCharWritable(new HiveChar("1", -1)), charConverter
+        .convert(Integer.valueOf(1)));
+
+      charConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaStringObjectInspector,
+        PrimitiveObjectInspectorFactory.javaHiveCharObjectInspector);
+      assertEquals("CharConverter", new HiveChar("hive", -1), charConverter
+        .convert(String.valueOf("hive")));
+
+      charConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaStringObjectInspector,
+        PrimitiveObjectInspectorFactory.writableHiveCharObjectInspector);
+      assertEquals("CharConverter", new HiveCharWritable(new HiveChar("hive", -1)), charConverter
+        .convert(String.valueOf("hive")));
+
+      // VarChar
+      Converter varcharConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaBooleanObjectInspector,
+        PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector);
+      assertEquals("VarCharConverter", new HiveVarchar("TRUE", -1), varcharConverter
+        .convert(Boolean.valueOf(true)));
+      assertEquals("VarCharConverter", new HiveVarchar("FALSE", -1), varcharConverter
+        .convert(Boolean.valueOf(false)));
+
+      varcharConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaBooleanObjectInspector,
+        PrimitiveObjectInspectorFactory.writableHiveVarcharObjectInspector);
+      assertEquals("VarCharConverter", new HiveVarcharWritable(new HiveVarchar("TRUE", -1)), varcharConverter
+        .convert(Boolean.valueOf(true)));
+      assertEquals("VarCharConverter", new HiveVarcharWritable(new HiveVarchar("FALSE", -1)), varcharConverter
+        .convert(Boolean.valueOf(false)));
+
+      varcharConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaIntObjectInspector,
+        PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector);
+      assertEquals("VarCharConverter", new HiveVarchar("0", -1), varcharConverter
+        .convert(Integer.valueOf(0)));
+      assertEquals("VarCharConverter", new HiveVarchar("1", -1), varcharConverter
+        .convert(Integer.valueOf(1)));
+
+      varcharConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaIntObjectInspector,
+        PrimitiveObjectInspectorFactory.writableHiveVarcharObjectInspector);
+      assertEquals("VarCharConverter", new HiveVarcharWritable(new HiveVarchar("0", -1)), varcharConverter
+        .convert(Integer.valueOf(0)));
+      assertEquals("VarCharConverter", new HiveVarcharWritable(new HiveVarchar("1", -1)), varcharConverter
+        .convert(Integer.valueOf(1)));
+
+      varcharConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaStringObjectInspector,
+        PrimitiveObjectInspectorFactory.javaHiveVarcharObjectInspector);
+      assertEquals("VarCharConverter", new HiveVarchar("hive", -1), varcharConverter
+        .convert(String.valueOf("hive")));
+
+      varcharConverter = ObjectInspectorConverters.getConverter(
+        PrimitiveObjectInspectorFactory.javaStringObjectInspector,
+        PrimitiveObjectInspectorFactory.writableHiveVarcharObjectInspector);
+      assertEquals("VarCharConverter", new HiveVarcharWritable(new HiveVarchar("hive", -1)), varcharConverter
+        .convert(String.valueOf("hive")));
+
       // Text
       Converter textConverter = ObjectInspectorConverters.getConverter(
           PrimitiveObjectInspectorFactory.javaIntObjectInspector,

http://git-wip-us.apache.org/repos/asf/hive/blob/ddab69c4/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestStandardObjectInspectors.java
----------------------------------------------------------------------
diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestStandardObjectInspectors.java b/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestStandardObjectInspectors.java
index 7d87666..29906c9 100644
--- a/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestStandardObjectInspectors.java
+++ b/serde/src/test/org/apache/hadoop/hive/serde2/objectinspector/TestStandardObjectInspectors.java
@@ -24,9 +24,13 @@ import java.util.List;
 
 import junit.framework.TestCase;
 
+import org.apache.hadoop.hive.common.type.HiveChar;
+import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.serde2.SerDeUtils;
 import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
+import org.apache.hadoop.hive.serde2.io.HiveCharWritable;
+import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
@@ -118,8 +122,14 @@ public class TestStandardObjectInspectors extends TestCase {
             .getClass());
       }
 
-      assertEquals(PrimitiveObjectInspectorUtils
+      if (javaClass == HiveVarchar.class) {
+        assertEquals("varchar(65535)", oi1.getTypeName());
+      } else if (javaClass == HiveChar.class) {
+        assertEquals("char(255)", oi1.getTypeName());
+      } else {
+        assertEquals(PrimitiveObjectInspectorUtils
           .getTypeNameFromPrimitiveJava(javaClass), oi1.getTypeName());
+      }
     } catch (Throwable e) {
       e.printStackTrace();
       throw e;
@@ -143,6 +153,8 @@ public class TestStandardObjectInspectors extends TestCase {
       doTestJavaPrimitiveObjectInspector(DoubleWritable.class, Double.class,
           (double) 1);
       doTestJavaPrimitiveObjectInspector(Text.class, String.class, "a");
+      doTestJavaPrimitiveObjectInspector(HiveVarcharWritable.class, HiveVarchar.class, "a");
+      doTestJavaPrimitiveObjectInspector(HiveCharWritable.class, HiveChar.class, "a");
       doTestJavaPrimitiveObjectInspector(BytesWritable.class, byte[].class, new byte[]{'3'});
 
     } catch (Throwable e) {


[37/58] [abbrv] hive git commit: HIVE-13486: Cast the column type for column masking (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13486: Cast the column type for column masking (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/529580f8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/529580f8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/529580f8

Branch: refs/heads/llap
Commit: 529580f88e6e9c694a705028e08fd3ee59fd260a
Parents: 7049f49
Author: Pengcheng Xiong <px...@apache.org>
Authored: Wed Apr 13 13:12:50 2016 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Wed Apr 13 13:12:50 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/parse/MaskAndFilterInfo.java    |  7 ++++++-
 .../org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java | 10 ++++++----
 .../java/org/apache/hadoop/hive/ql/parse/TableMask.java   |  9 ++++++++-
 3 files changed, 20 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/529580f8/ql/src/java/org/apache/hadoop/hive/ql/parse/MaskAndFilterInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaskAndFilterInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaskAndFilterInfo.java
index 1678d2c..f5a12a3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaskAndFilterInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaskAndFilterInfo.java
@@ -18,13 +18,18 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
+import java.util.List;
+
 public class MaskAndFilterInfo {
+  List<String> colTypes;
   String additionalTabInfo;
   String alias;
   ASTNode astNode;
 
-  public MaskAndFilterInfo(String additionalTabInfo, String alias, ASTNode astNode) {
+  public MaskAndFilterInfo(List<String> colTypes, String additionalTabInfo, String alias,
+      ASTNode astNode) {
     super();
+    this.colTypes = colTypes;
     this.additionalTabInfo = additionalTabInfo;
     this.alias = alias;
     this.astNode = astNode;

http://git-wip-us.apache.org/repos/asf/hive/blob/529580f8/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9b565c5..d3e7040 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -10408,13 +10408,15 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           throw new SemanticException("Table " + tabIdName + " is not found.");
         }
 
-        List<String> columns = new ArrayList<>();
+        List<String> colNames = new ArrayList<>();
+        List<String> colTypes = new ArrayList<>();
         for (FieldSchema col : table.getAllCols()) {
-          columns.add(col.getName());
+          colNames.add(col.getName());
+          colTypes.add(col.getType());
         }
         
-        basicInfos.put(new HivePrivilegeObject(table.getDbName(), table.getTableName(), columns),
-            new MaskAndFilterInfo(additionalTabInfo.toString(), alias, astNode));
+        basicInfos.put(new HivePrivilegeObject(table.getDbName(), table.getTableName(), colNames),
+            new MaskAndFilterInfo(colTypes, additionalTabInfo.toString(), alias, astNode));
       }
       if (astNode.getChildCount() > 0 && !ignoredTokens.contains(astNode.getToken().getType())) {
         for (Node child : astNode.getChildren()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/529580f8/ql/src/java/org/apache/hadoop/hive/ql/parse/TableMask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableMask.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableMask.java
index f030da2..f3c7262 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableMask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableMask.java
@@ -89,6 +89,7 @@ public class TableMask {
         throw new SemanticException("Expect " + privObject.getColumns().size() + " columns in "
             + privObject.getObjectName() + ", but only find " + exprs.size());
       }
+      List<String> colTypes = maskAndFilterInfo.colTypes;
       for (int index = 0; index < exprs.size(); index++) {
         String expr = exprs.get(index);
         if (expr == null) {
@@ -100,7 +101,13 @@ public class TableMask {
         } else {
           firstOne = false;
         }
-        sb.append(expr + " AS " + privObject.getColumns().get(index));
+        String colName = privObject.getColumns().get(index);
+        if (!expr.equals(colName)) {
+          // CAST(expr AS COLTYPE) AS COLNAME
+          sb.append("CAST(" + expr + " AS " + colTypes.get(index) + ") AS " + colName);
+        } else {
+          sb.append(expr);
+        }
       }
     } else {
       for (int index = 0; index < privObject.getColumns().size(); index++) {


[19/58] [abbrv] hive git commit: HIVE-13149: Remove some unnecessary HMS connections from HS2 (Reviewed by Szehon Ho, Chaoyu Tang)

Posted by jd...@apache.org.
HIVE-13149: Remove some unnecessary HMS connections from HS2 (Reviewed by Szehon Ho, Chaoyu Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/37e6e1bf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/37e6e1bf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/37e6e1bf

Branch: refs/heads/llap
Commit: 37e6e1bf56d7d2fd557730380b147c745fc051ce
Parents: 1cb4ce8
Author: Aihua Xu <ai...@apache.org>
Authored: Fri Mar 18 10:27:49 2016 -0400
Committer: Aihua Xu <ai...@apache.org>
Committed: Mon Apr 11 14:58:59 2016 -0400

----------------------------------------------------------------------
 .../hadoop/hive/metastore/TestMetastoreVersion.java   |  7 ++++---
 .../metastore/hbase/TestHBaseMetastoreMetrics.java    |  4 +---
 .../org/apache/hadoop/hive/hbase/HBaseQTestUtil.java  | 10 +++++++++-
 .../org/apache/hadoop/hive/hbase/HBaseTestSetup.java  |  3 ---
 .../java/org/apache/hadoop/hive/ql/QTestUtil.java     | 14 ++++++++++----
 .../hadoop/hive/metastore/HiveMetaStoreClient.java    | 10 ++++++----
 .../apache/hadoop/hive/ql/session/SessionState.java   |  8 --------
 7 files changed, 30 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
index 53f0d0e..5ceb3d2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore;
 
 import java.io.File;
 import java.lang.reflect.Field;
-import java.util.Random;
 
 import junit.framework.TestCase;
 
@@ -32,6 +31,7 @@ import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
@@ -96,8 +96,9 @@ public class TestMetastoreVersion extends TestCase {
     // session creation should fail since the schema didn't get created
     try {
       SessionState.start(new CliSessionState(hiveConf));
-      fail("Expected exception");
-    } catch (RuntimeException re) {
+      Hive.get(hiveConf).getMSC();
+      fail("An exception is expected since schema is not created.");
+    } catch (Exception re) {
       LOG.info("Exception in testVersionRestriction: " + re, re);
       String msg = HiveStringUtils.stringifyException(re);
       assertTrue("Expected 'Version information not found in metastore' in: " + msg, msg

http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
index 3ed88f2..aefafe0 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
@@ -41,8 +41,6 @@ import java.io.IOException;
  */
 public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests {
 
-  private CodahaleMetrics metrics;
-
   @BeforeClass
   public static void startup() throws Exception {
     HBaseIntegrationTests.startMiniCluster();
@@ -66,7 +64,6 @@ public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests {
     conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name());
     SessionState.start(new CliSessionState(conf));
     driver = new Driver(conf);
-    metrics = (CodahaleMetrics) MetricsFactory.getInstance();
   }
 
   @Test
@@ -107,6 +104,7 @@ public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests {
     driver.run("use default");
     driver.run("drop database tempdb cascade");
 
+    CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
     String json = metrics.dumpJson();
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_DATABASES, 2);
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_TABLES, 7);

http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
index 3ff5742..70c0b13 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
@@ -39,12 +39,14 @@ public class HBaseQTestUtil extends QTestUtil {
   /** A handle to this harness's cluster */
   private final HConnection conn;
 
+  private HBaseTestSetup setup;
+
   public HBaseQTestUtil(
     String outDir, String logDir, MiniClusterType miniMr, HBaseTestSetup setup,
     String initScript, String cleanupScript)
     throws Exception {
-
     super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false, false);
+    this.setup = setup;
     setup.preTest(conf);
     this.conn = setup.getConnection();
     super.init();
@@ -69,6 +71,12 @@ public class HBaseQTestUtil extends QTestUtil {
   }
 
   @Override
+  protected void initConfFromSetup() throws Exception {
+    super.initConfFromSetup();
+    setup.preTest(conf);
+  }
+
+  @Override
   public void createSources(String tname) throws Exception {
     super.createSources(tname);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
index e6383dc..cee7158 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
@@ -22,9 +22,6 @@ import java.io.IOException;
 import java.net.ServerSocket;
 import java.util.Arrays;
 
-import junit.extensions.TestSetup;
-import junit.framework.Test;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;

http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 8473436..2f109ab 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -510,6 +510,7 @@ public class QTestUtil {
       dfs.shutdown();
       dfs = null;
     }
+    Hive.closeCurrent();
   }
 
   public String readEntireFileIntoString(File queryFile) throws IOException {
@@ -731,8 +732,9 @@ public class QTestUtil {
       return;
     }
 
-    db.getConf().set("hive.metastore.filter.hook",
+    conf.set("hive.metastore.filter.hook",
         "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
+    db = Hive.get(conf);
     // Delete any tables other than the source tables
     // and any databases other than the default database.
     for (String dbName : db.getAllDatabases()) {
@@ -800,16 +802,20 @@ public class QTestUtil {
       return;
     }
 
-    clearTablesCreatedDuringTests();
-    clearKeysCreatedInTests();
-
     // allocate and initialize a new conf since a test can
     // modify conf by using 'set' commands
     conf = new HiveConf(Driver.class);
     initConf();
+    initConfFromSetup();
+
     // renew the metastore since the cluster type is unencrypted
     db = Hive.get(conf);  // propagate new conf to meta store
 
+    clearTablesCreatedDuringTests();
+    clearKeysCreatedInTests();
+  }
+
+  protected void initConfFromSetup() throws Exception {
     setup.preTest(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index cdd12ab..64a26ac 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -178,7 +178,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
   private boolean isConnected = false;
   private URI metastoreUris[];
   private final HiveMetaHookLoader hookLoader;
-  protected final HiveConf conf;
+  protected final HiveConf conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
   protected boolean fastpath = false;
   private String tokenStrForm;
   private final boolean localMetaStore;
@@ -205,8 +205,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
     this.hookLoader = hookLoader;
     if (conf == null) {
       conf = new HiveConf(HiveMetaStoreClient.class);
+      this.conf = conf;
+    } else {
+      this.conf = new HiveConf(conf);
     }
-    this.conf = conf;
     filterHook = loadFilterHooks();
     fileMetadataBatchSize = HiveConf.getIntVar(
         conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX);
@@ -221,10 +223,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
       // instantiate the metastore server handler directly instead of connecting
       // through the network
       if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
-        client = new HiveMetaStore.HMSHandler("hive client", conf, true);
+        client = new HiveMetaStore.HMSHandler("hive client", this.conf, true);
         fastpath = true;
       } else {
-        client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true);
+        client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
       }
       isConnected = true;
       snapshotActiveConf();

http://git-wip-us.apache.org/repos/asf/hive/blob/37e6e1bf/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 23b8a96..f7a0f31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -546,10 +546,6 @@ public class SessionState {
     // Get the following out of the way when you start the session these take a
     // while and should be done when we start up.
     try {
-      // Hive object instance should be created with a copy of the conf object. If the conf is
-      // shared with SessionState, other parts of the code might update the config, but
-      // Hive.get(HiveConf) would not recognize the case when it needs refreshing
-      Hive.get(new HiveConf(startSs.conf)).getMSC();
       UserGroupInformation sessionUGI = Utils.getUGI();
       FileSystem.get(startSs.conf);
 
@@ -575,10 +571,6 @@ public class SessionState {
       }
     } catch (RuntimeException e) {
       throw e;
-    } catch (Hive.SchemaException e) {
-      RuntimeException ex = new RuntimeException(e.getMessage());
-      ex.setStackTrace(new StackTraceElement[0]);
-      throw ex;
     } catch (Exception e) {
       // Catch-all due to some exec time dependencies on session state
       // that would cause ClassNoFoundException otherwise


[22/58] [abbrv] hive git commit: HIVE-13429: Tool to remove dangling scratch dir (Daniel Dai, reviewed by Thejas Nair)

Posted by jd...@apache.org.
HIVE-13429: Tool to remove dangling scratch dir (Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d3532169
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d3532169
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d3532169

Branch: refs/heads/llap
Commit: d3532169a29537d02c6e4a6768d0db297ea299d6
Parents: 672419d
Author: Daniel Dai <da...@hortonworks.com>
Authored: Mon Apr 11 15:14:51 2016 -0700
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Mon Apr 11 15:14:51 2016 -0700

----------------------------------------------------------------------
 bin/ext/cleardanglingscratchdir.cmd             |  35 ++++
 bin/ext/cleardanglingscratchdir.sh              |  28 +++
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../ql/session/TestClearDanglingScratchDir.java | 161 +++++++++++++++++
 .../ql/session/ClearDanglingScratchDir.java     | 176 +++++++++++++++++++
 .../hadoop/hive/ql/session/SessionState.java    |  33 +++-
 6 files changed, 431 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d3532169/bin/ext/cleardanglingscratchdir.cmd
----------------------------------------------------------------------
diff --git a/bin/ext/cleardanglingscratchdir.cmd b/bin/ext/cleardanglingscratchdir.cmd
new file mode 100644
index 0000000..31104af
--- /dev/null
+++ b/bin/ext/cleardanglingscratchdir.cmd
@@ -0,0 +1,35 @@
+@echo off
+@rem Licensed to the Apache Software Foundation (ASF) under one or more
+@rem contributor license agreements.  See the NOTICE file distributed with
+@rem this work for additional information regarding copyright ownership.
+@rem The ASF licenses this file to You under the Apache License, Version 2.0
+@rem (the "License"); you may not use this file except in compliance with
+@rem the License.  You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+set CLASS=org.apache.hadoop.hive.ql.session.ClearDanglingScratchDir
+set HIVE_OPTS=
+set HADOOP_CLASSPATH=
+
+pushd %HIVE_LIB%
+for /f %%a IN ('dir /b hive-exec-*.jar') do (
+        set JAR=%HIVE_LIB%\%%a
+)
+popd
+
+if [%1]==[cleardanglingscratchdir_help] goto :cleardanglingscratchdir_help
+
+:cleardanglingscratchdir
+        call %HIVE_BIN_PATH%\ext\util\execHiveCmd.cmd %CLASS%
+goto :EOF
+
+:cleardanglingscratchdir_help
+        echo "usage hive --service cleardanglingscratchdir"
+goto :EOF

http://git-wip-us.apache.org/repos/asf/hive/blob/d3532169/bin/ext/cleardanglingscratchdir.sh
----------------------------------------------------------------------
diff --git a/bin/ext/cleardanglingscratchdir.sh b/bin/ext/cleardanglingscratchdir.sh
new file mode 100644
index 0000000..dcc44e3
--- /dev/null
+++ b/bin/ext/cleardanglingscratchdir.sh
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+THISSERVICE=cleardanglingscratchdir
+export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} "
+
+cleardanglingscratchdir () {
+  CLASS=org.apache.hadoop.hive.ql.session.ClearDanglingScratchDir
+  HIVE_OPTS=''
+  execHiveCmd $CLASS "$@"
+}
+
+cleardanglingscratchdir_help () {
+  echo ""
+  echo "usage ./hive --service cleardanglingscratchdir"
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d3532169/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index fabb8ab..1702eb1 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1974,6 +1974,8 @@ public class HiveConf extends Configuration {
         "Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger"),
     HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false,
         "To cleanup the Hive scratchdir when starting the Hive Server"),
+    HIVE_SCRATCH_DIR_LOCK("hive.scratchdir.lock", false,
+        "To hold a lock file in scratchdir to prevent to be removed by cleardanglingscratchdir"),
     HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false,
         "Where to insert into multilevel directories like\n" +
         "\"insert directory '/HIVEFT25686/chinna/' from table\""),

http://git-wip-us.apache.org/repos/asf/hive/blob/d3532169/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
new file mode 100644
index 0000000..1007113
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.session;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.nio.channels.FileChannel;
+import java.util.UUID;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.LoggerFactory;
+
+public class TestClearDanglingScratchDir {
+  private static MiniDFSCluster m_dfs = null;
+  private static HiveConf conf;
+  private static Path scratchDir;
+  private ByteArrayOutputStream stdout;
+  private PrintStream origStdoutPs;
+  private static File logFile;
+
+  @BeforeClass
+  static public void oneTimeSetup() throws Exception {
+    logFile = File.createTempFile("log", "");
+    File log4jConfig = File.createTempFile("config", ".properties");
+    log4jConfig.deleteOnExit();
+    PrintWriter pw = new PrintWriter(log4jConfig);
+    pw.println("appenders = console, file");
+    pw.println("appender.console.type = Console");
+    pw.println("appender.console.name = STDOUT");
+    pw.println("appender.console.layout.type = PatternLayout");
+    pw.println("appender.console.layout.pattern = %t %-5p %c{2} - %m%n");
+    pw.println("appender.file.type = File");
+    pw.println("appender.file.name = LOGFILE");
+    pw.println("appender.file.fileName = " + logFile.getAbsolutePath());
+    pw.println("appender.file.layout.type = PatternLayout");
+    pw.println("appender.file.layout.pattern = %t %-5p %c{2} - %m%n");
+    pw.println("rootLogger.level = debug");
+    pw.println("rootLogger.appenderRefs = stdout");
+    pw.println("rootLogger.appenderRef.stdout.ref = STDOUT");
+    pw.println("loggers = file");
+    pw.println("logger.file.name = SessionState");
+    pw.println("logger.file.level = debug");
+    pw.println("logger.file.appenderRefs = file");
+    pw.println("logger.file.appenderRef.file.ref = LOGFILE");
+    pw.close();
+    System.setProperty("log4j.configurationFile", log4jConfig.getAbsolutePath());
+
+    m_dfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).build();
+    conf = new HiveConf();
+    conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true");
+    conf.set(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.toString(), "true");
+    LoggerFactory.getLogger("SessionState");
+    conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
+        new Path(System.getProperty("test.tmp.dir"), "warehouse").toString());
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
+        m_dfs.getFileSystem().getUri().toString());
+
+    scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR));
+    m_dfs.getFileSystem().mkdirs(scratchDir);
+    m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777"));
+  }
+
+  @AfterClass
+  static public void shutdown() throws Exception {
+    m_dfs.shutdown();
+  }
+
+  public void redirectOutput() throws IOException {
+    stdout = new ByteArrayOutputStream();
+    PrintStream psStdout = new PrintStream(stdout);
+    origStdoutPs = System.out;
+    System.setOut(psStdout);
+
+    FileOutputStream fos = new FileOutputStream(logFile, true);
+    FileChannel outChan = fos.getChannel();
+    outChan.truncate(0);
+    outChan.close();
+    fos.close();
+  }
+
+  public void rollbackOutput() {
+    System.setOut(origStdoutPs);
+  }
+
+  @Test
+  public void testClearDanglingScratchDir() throws Exception {
+
+    // No scratch dir initially
+    redirectOutput();
+    ClearDanglingScratchDir.main(new String[]{"-s",
+        m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
+    rollbackOutput();
+    Assert.assertTrue(FileUtils.readFileToString(logFile).contains("Cannot find any scratch directory to clear"));
+
+    // Create scratch dir without lock files
+    m_dfs.getFileSystem().mkdirs(new Path(new Path(scratchDir, "dummy"), UUID.randomUUID().toString()));
+    redirectOutput();
+    ClearDanglingScratchDir.main(new String[]{"-s",
+        m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
+    rollbackOutput();
+    Assert.assertEquals(StringUtils.countMatches(FileUtils.readFileToString(logFile),
+        "since it does not contain " + SessionState.LOCK_FILE_NAME), 1);
+    Assert.assertTrue(FileUtils.readFileToString(logFile).contains("Cannot find any scratch directory to clear"));
+
+    // One live session
+    SessionState ss = SessionState.start(conf);
+    redirectOutput();
+    ClearDanglingScratchDir.main(new String[]{"-s",
+        m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
+    rollbackOutput();
+    Assert.assertEquals(StringUtils.countMatches(FileUtils.readFileToString(logFile), "is being used by live process"), 1);
+
+    // One dead session with dry-run
+    ss.releaseSessionLockFile();
+    redirectOutput();
+    ClearDanglingScratchDir.main(new String[]{"-r", "-s",
+        m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
+    rollbackOutput();
+    // Find one session dir to remove
+    Assert.assertFalse(stdout.toString().isEmpty());
+
+    // Remove the dead session dir
+    redirectOutput();
+    ClearDanglingScratchDir.main(new String[]{"-s",
+        m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
+    rollbackOutput();
+    Assert.assertTrue(FileUtils.readFileToString(logFile).contains("Removing 1 scratch directories"));
+    Assert.assertEquals(StringUtils.countMatches(FileUtils.readFileToString(logFile), "removed"), 1);
+    ss.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d3532169/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
new file mode 100644
index 0000000..8543768
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java
@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.session;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
+
+/**
+ * A tool to remove dangling scratch directory. A scratch directory could be left behind
+ * in some cases, such as when vm restarts and leave no chance for Hive to run shutdown hook.
+ * The tool will test a scratch directory is use, if not, remove it.
+ * We rely on HDFS write lock for to detect if a scratch directory is in use:
+ * 1. A HDFS client open HDFS file ($scratchdir/inuse.lck) for write and only close
+ *    it at the time the session is closed
+ * 2. cleardanglingscratchDir can try to open $scratchdir/inuse.lck for write. If the
+ *    corresponding HiveCli/HiveServer2 is still running, we will get exception.
+ *    Otherwise, we know the session is dead
+ * 3. If the HiveCli/HiveServer2 dies without closing the HDFS file, NN will reclaim the
+ *    lease after 10 min, ie, the HDFS file hold by the dead HiveCli/HiveServer2 is writable
+ *    again after 10 min. Once it become writable, cleardanglingscratchDir will be able to
+ *    remove it
+ */
+public class ClearDanglingScratchDir {
+
+  public static void main(String[] args) throws Exception {
+    Options opts = createOptions();
+    CommandLine cli = new GnuParser().parse(opts, args);
+
+    if (cli.hasOption('h')) {
+      HelpFormatter formatter = new HelpFormatter();
+      formatter.printHelp("cleardanglingscratchdir"
+          + " (clear scratch dir left behind by dead HiveCli or HiveServer2)", opts);
+      return;
+    }
+
+    boolean dryRun = false;
+    boolean verbose = false;
+
+    if (cli.hasOption("r")) {
+      dryRun = true;
+    }
+
+    if (cli.hasOption("v")) {
+      verbose = true;
+    }
+
+    HiveConf conf = new HiveConf();
+
+    Path rootHDFSDirPath;
+    if (cli.hasOption("s")) {
+      rootHDFSDirPath = new Path(cli.getOptionValue("s"));
+    } else {
+      rootHDFSDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR));
+    }
+
+    FileSystem fs = FileSystem.get(rootHDFSDirPath.toUri(), conf);
+    FileStatus[] userHDFSDirList = fs.listStatus(rootHDFSDirPath);
+
+    List<Path> scratchDirToRemove = new ArrayList<Path>();
+    for (FileStatus userHDFSDir : userHDFSDirList) {
+      FileStatus[] scratchDirList = fs.listStatus(userHDFSDir.getPath());
+      for (FileStatus scratchDir : scratchDirList) {
+        Path lockFilePath = new Path(scratchDir.getPath(), SessionState.LOCK_FILE_NAME);
+        if (!fs.exists(lockFilePath)) {
+          String message = "Skipping " + scratchDir.getPath() + " since it does not contain " +
+              SessionState.LOCK_FILE_NAME;
+          if (verbose) {
+            SessionState.getConsole().printInfo(message);
+          } else {
+            SessionState.getConsole().logInfo(message);
+          }
+          continue;
+        }
+        try {
+          IOUtils.closeStream(fs.append(lockFilePath));
+          scratchDirToRemove.add(scratchDir.getPath());
+        } catch (RemoteException e) {
+          // RemoteException with AlreadyBeingCreatedException will be thrown
+          // if the file is currently held by a writer
+          if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
+            // Cannot open the lock file for writing, must be held by a live process
+            String message = scratchDir.getPath() + " is being used by live process";
+            if (verbose) {
+              SessionState.getConsole().printInfo(message);
+            } else {
+              SessionState.getConsole().logInfo(message);
+            }
+          } else {
+            throw e;
+          }
+        }
+      }
+    }
+
+    if (scratchDirToRemove.size()==0) {
+      SessionState.getConsole().printInfo("Cannot find any scratch directory to clear");
+      return;
+    }
+    SessionState.getConsole().printInfo("Removing " + scratchDirToRemove.size() + " scratch directories");
+    for (Path scratchDir : scratchDirToRemove) {
+      if (dryRun) {
+        System.out.println(scratchDir);
+      } else {
+        boolean succ = fs.delete(scratchDir, true);
+        if (!succ) {
+          SessionState.getConsole().printInfo("Cannot remove " + scratchDir);
+        } else {
+          String message = scratchDir + " removed";
+          if (verbose) {
+            SessionState.getConsole().printInfo(message);
+          } else {
+            SessionState.getConsole().logInfo(message);
+          }
+        }
+      }
+    }
+  }
+
+  static Options createOptions() {
+    Options result = new Options();
+
+    // add -r and --dry-run to generate list only
+    result.addOption(OptionBuilder
+        .withLongOpt("dry-run")
+        .withDescription("Generate a list of dangling scratch dir, printed on console")
+        .create('r'));
+
+    // add -s and --scratchdir to specify a non-default scratch dir
+    result.addOption(OptionBuilder
+        .withLongOpt("scratchdir")
+        .withDescription("Specify a non-default location of the scratch dir")
+        .hasArg()
+        .create('s'));
+
+    // add -v and --verbose to print verbose message
+    result.addOption(OptionBuilder
+        .withLongOpt("verbose")
+        .withDescription("Print verbose message")
+        .create('v'));
+
+    result.addOption(OptionBuilder
+        .withLongOpt("help")
+        .withDescription("print help message")
+        .create('h'));
+
+    return result;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d3532169/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index f7a0f31..ca18247 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -23,6 +23,8 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.PrintStream;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URLClassLoader;
@@ -46,6 +48,7 @@ import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -105,6 +108,7 @@ public class SessionState {
   private static final String LOCAL_SESSION_PATH_KEY = "_hive.local.session.path";
   private static final String HDFS_SESSION_PATH_KEY = "_hive.hdfs.session.path";
   private static final String TMP_TABLE_SPACE_KEY = "_hive.tmp_table_space";
+  static final String LOCK_FILE_NAME = "inuse.lck";
 
   private final Map<String, Map<String, Table>> tempTables = new HashMap<String, Map<String, Table>>();
   private final Map<String, Map<String, ColumnStatisticsObj>> tempTableColStats =
@@ -228,6 +232,8 @@ public class SessionState {
    */
   private Path hdfsSessionPath;
 
+  private FSDataOutputStream hdfsSessionPathLockFile = null;
+
   /**
    * sub dir of hdfs session path. used to keep tmp tables
    * @return Path for temporary tables created by the current session
@@ -610,8 +616,9 @@ public class SessionState {
    * 2. Local scratch dir
    * 3. Local downloaded resource dir
    * 4. HDFS session path
-   * 5. Local session path
-   * 6. HDFS temp table space
+   * 5. hold a lock file in HDFS session dir to indicate the it is in use
+   * 6. Local session path
+   * 7. HDFS temp table space
    * @param userName
    * @throws IOException
    */
@@ -639,11 +646,19 @@ public class SessionState {
     hdfsSessionPath = new Path(hdfsScratchDirURIString, sessionId);
     createPath(conf, hdfsSessionPath, scratchDirPermission, false, true);
     conf.set(HDFS_SESSION_PATH_KEY, hdfsSessionPath.toUri().toString());
-    // 5. Local session path
+    // 5. hold a lock file in HDFS session dir to indicate the it is in use
+    if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK)) {
+      FileSystem fs = FileSystem.get(conf);
+      hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, LOCK_FILE_NAME), true);
+      hdfsSessionPathLockFile.writeUTF("hostname: " + InetAddress.getLocalHost().getHostName() + "\n");
+      hdfsSessionPathLockFile.writeUTF("process: " + ManagementFactory.getRuntimeMXBean().getName() + "\n");
+      hdfsSessionPathLockFile.hsync();
+    }
+    // 6. Local session path
     localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId);
     createPath(conf, localSessionPath, scratchDirPermission, true, true);
     conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString());
-    // 6. HDFS temp table space
+    // 7. HDFS temp table space
     hdfsTmpTableSpace = new Path(hdfsSessionPath, TMP_PREFIX);
     createPath(conf, hdfsTmpTableSpace, scratchDirPermission, false, true);
     conf.set(TMP_TABLE_SPACE_KEY, hdfsTmpTableSpace.toUri().toString());
@@ -758,8 +773,18 @@ public class SessionState {
     return this.hdfsTmpTableSpace;
   }
 
+  @VisibleForTesting
+  void releaseSessionLockFile() throws IOException {
+    if (hdfsSessionPath != null && hdfsSessionPathLockFile != null) {
+      hdfsSessionPathLockFile.close();
+    }
+  }
+
   private void dropSessionPaths(Configuration conf) throws IOException {
     if (hdfsSessionPath != null) {
+      if (hdfsSessionPathLockFile != null) {
+        hdfsSessionPathLockFile.close();
+      }
       hdfsSessionPath.getFileSystem(conf).delete(hdfsSessionPath, true);
       LOG.info("Deleted HDFS directory: " + hdfsSessionPath);
     }


[11/58] [abbrv] hive git commit: HIVE-13339 : Vectorization: GenericUDFBetween in Projection mode (Gopal V via Sergey Shelukhin)

Posted by jd...@apache.org.
HIVE-13339 : Vectorization: GenericUDFBetween in Projection mode (Gopal V via Sergey Shelukhin)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/eb56666d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/eb56666d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/eb56666d

Branch: refs/heads/llap
Commit: eb56666d8c3e941d8846f8c189c4f4b255acee1c
Parents: 677e5d2
Author: Gopal V <go...@apache.org>
Authored: Wed Mar 23 01:33:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Apr 9 15:07:51 2016 -0700

----------------------------------------------------------------------
 .../ql/exec/vector/VectorizationContext.java    |   9 +-
 .../queries/clientpositive/vector_between_in.q  |  30 ++
 .../spark/vector_between_in.q.out               | 332 ++++++++++++++++++
 .../clientpositive/tez/vector_between_in.q.out  | 336 +++++++++++++++++++
 .../clientpositive/vector_between_in.q.out      | 304 +++++++++++++++++
 5 files changed, 1008 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/eb56666d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 30a0f5a..329c1d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -463,7 +463,7 @@ public class VectorizationContext {
       ve = getColumnVectorExpression((ExprNodeColumnDesc) exprDesc, mode);
     } else if (exprDesc instanceof ExprNodeGenericFuncDesc) {
       ExprNodeGenericFuncDesc expr = (ExprNodeGenericFuncDesc) exprDesc;
-      if (isCustomUDF(expr) || isNonVectorizedPathUDF(expr)) {
+      if (isCustomUDF(expr) || isNonVectorizedPathUDF(expr, mode)) {
         ve = getCustomUDFExpression(expr);
       } else {
 
@@ -752,7 +752,7 @@ public class VectorizationContext {
    * Depending on performance requirements and frequency of use, these
    * may be implemented in the future with an optimized VectorExpression.
    */
-  public static boolean isNonVectorizedPathUDF(ExprNodeGenericFuncDesc expr) {
+  public static boolean isNonVectorizedPathUDF(ExprNodeGenericFuncDesc expr, Mode mode) {
     GenericUDF gudf = expr.getGenericUDF();
     if (gudf instanceof GenericUDFBridge) {
       GenericUDFBridge bridge = (GenericUDFBridge) gudf;
@@ -794,6 +794,9 @@ public class VectorizationContext {
                 || arg0Type(expr).equals("double")
                 || arg0Type(expr).equals("float"))) {
       return true;
+    } else if (gudf instanceof GenericUDFBetween && (mode == Mode.PROJECTION)) {
+      // between has 4 args here, but can be vectorized like this 
+      return true;
     }
     return false;
   }
@@ -1196,7 +1199,7 @@ public class VectorizationContext {
     childExpr = castedChildren;
 
     //First handle special cases
-    if (udf instanceof GenericUDFBetween) {
+    if (udf instanceof GenericUDFBetween && mode == Mode.FILTER) {
       return getBetweenFilterExpression(childExpr, mode, returnType);
     } else if (udf instanceof GenericUDFIn) {
       return getInExpression(childExpr, mode, returnType);

http://git-wip-us.apache.org/repos/asf/hive/blob/eb56666d/ql/src/test/queries/clientpositive/vector_between_in.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_between_in.q b/ql/src/test/queries/clientpositive/vector_between_in.q
index d57f980..487bf96 100644
--- a/ql/src/test/queries/clientpositive/vector_between_in.q
+++ b/ql/src/test/queries/clientpositive/vector_between_in.q
@@ -35,3 +35,33 @@ SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS
 SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1;
 
 SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351;
+
+
+
+-- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0; 
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0;
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0; 
+
+SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+set hive.vectorized.execution.enabled=false;
+
+SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0; 
+
+SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0;
+
+SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0;

http://git-wip-us.apache.org/repos/asf/hive/blob/eb56666d/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index 06490a8..fbb43c4 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -693,3 +693,335 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_date_test
 #### A masked pattern was here ####
 6172
+PREHOOK: query: -- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: -- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Spark
+      Edges:
+        Reducer 2 <- Map 1 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	6041
+true	17
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	9165
+true	9
+PREHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	5974
+true	84
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	3002
+true	6172
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	6041
+true	17
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	9165
+true	9
+PREHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	5974
+true	84
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	3002
+true	6172

http://git-wip-us.apache.org/repos/asf/hive/blob/eb56666d/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
index 4ae687e..a4cf61a 100644
--- a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
@@ -701,3 +701,339 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_date_test
 #### A masked pattern was here ####
 6172
+PREHOOK: query: -- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: -- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Reducer 2 <- Map 1 (SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: decimal_date_test
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      aggregations: count(1)
+                      keys: _col0 (type: boolean)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: boolean)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: boolean)
+                        Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col1 (type: bigint)
+            Execution mode: vectorized
+        Reducer 2 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: boolean)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	6041
+true	17
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	9165
+true	9
+PREHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	5974
+true	84
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	3002
+true	6172
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	6041
+true	17
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	9165
+true	9
+PREHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	5974
+true	84
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	3002
+true	6172

http://git-wip-us.apache.org/repos/asf/hive/blob/eb56666d/ql/src/test/results/clientpositive/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out
index 4c3ed71..9f351b2 100644
--- a/ql/src/test/results/clientpositive/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_in.q.out
@@ -637,3 +637,307 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@decimal_date_test
 #### A masked pattern was here ####
 6172
+PREHOOK: query: -- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: -- projections
+
+EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: decimal_date_test
+            Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean)
+              outputColumnNames: _col0
+              Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: boolean)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: boolean)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: boolean)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: decimal_date_test
+            Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean)
+              outputColumnNames: _col0
+              Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: boolean)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: boolean)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: boolean)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0  FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: decimal_date_test
+            Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean)
+              outputColumnNames: _col0
+              Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: boolean)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: boolean)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: boolean)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: decimal_date_test
+            Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean)
+              outputColumnNames: _col0
+              Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count(1)
+                keys: _col0 (type: boolean)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: boolean)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: boolean)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	6041
+true	17
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	9165
+true	9
+PREHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	5974
+true	84
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	3002
+true	6172
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	6041
+true	17
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	9165
+true	9
+PREHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT  cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	6230
+false	5974
+true	84
+PREHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT c0, count(1) from (SELECT cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 as c0 FROM decimal_date_test) tab GROUP BY c0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@decimal_date_test
+#### A masked pattern was here ####
+NULL	3114
+false	3002
+true	6172


[27/58] [abbrv] hive git commit: HIVE-13472 : Replace primitive wrapper's valueOf method with parse* method to avoid unnecessary boxing/unboxing (Kousuke Saruta via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13472 : Replace primitive wrapper's valueOf method with parse* method to avoid unnecessary boxing/unboxing (Kousuke Saruta via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/547b37dc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/547b37dc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/547b37dc

Branch: refs/heads/llap
Commit: 547b37dcb24ed745fa2a3389ba2cafdb0476a325
Parents: 14bcbab
Author: Kousuke Saruta <sa...@oss.nttdata.co.jp>
Authored: Sun Apr 10 03:03:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Apr 12 09:26:27 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hive/beeline/BeeLine.java   |  4 ++--
 .../beeline/SeparatedValuesOutputFormat.java    |  2 +-
 .../apache/hadoop/hive/common/FileUtils.java    |  2 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |  4 ++--
 .../org/apache/hadoop/hive/conf/Validator.java  |  2 +-
 .../hadoop/hive/hbase/HBaseSerDeParameters.java |  6 ++---
 .../hive/hbase/HiveHBaseInputFormatUtil.java    |  6 ++---
 .../mapreduce/FileOutputCommitterContainer.java |  4 ++--
 .../mapreduce/FosterStorageHandler.java         |  6 ++---
 .../streaming/StreamingIntegrationTester.java   | 12 +++++-----
 .../hive/jdbc/miniHS2/StartMiniHS2Cluster.java  |  2 +-
 .../org/apache/hive/jdbc/HiveBaseResultSet.java | 10 ++++----
 .../impl/LlapZookeeperRegistryImpl.java         | 10 ++++----
 .../llap/shufflehandler/ShuffleHandler.java     |  2 +-
 .../hive/metastore/MetaStoreSchemaInfo.java     |  4 ++--
 .../hive/metastore/hbase/HBaseImport.java       |  4 ++--
 .../hive/metastore/hbase/HBaseReadWrite.java    | 10 ++++----
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |  4 ++--
 .../ql/io/parquet/convert/ETypeConverter.java   |  3 ++-
 .../io/parquet/convert/HiveStructConverter.java |  2 +-
 .../write/ParquetRecordWriterWrapper.java       |  4 ++--
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  2 +-
 .../SizeBasedBigTableSelectorForAutoSMJ.java    |  2 +-
 .../calcite/cost/HiveAlgorithmsUtil.java        | 12 +++++-----
 .../apache/hadoop/hive/ql/parse/ASTNode.java    |  2 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  4 ++--
 .../apache/hadoop/hive/ql/parse/ParseUtils.java |  8 +++----
 .../hadoop/hive/ql/parse/ReplicationSpec.java   |  3 ++-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 10 ++++----
 .../hadoop/hive/ql/parse/TableSample.java       |  4 ++--
 .../hive/ql/parse/spark/GenSparkUtils.java      |  2 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |  2 +-
 .../hive/ql/stats/fs/FSStatsAggregator.java     |  2 +-
 .../hive/ql/stats/fs/FSStatsPublisher.java      |  3 ++-
 .../hive/ql/txn/compactor/CompactorMR.java      | 12 +++++-----
 .../apache/hadoop/hive/ql/udf/UDFToDouble.java  |  2 +-
 .../apache/hadoop/hive/ql/udf/UDFToFloat.java   |  2 +-
 .../hive/ql/udf/generic/GenericUDFBetween.java  |  2 +-
 .../results/clientnegative/dyn_part_max.q.out   |  2 +-
 .../serde2/MetadataTypedColumnsetSerDe.java     |  2 +-
 .../serde2/dynamic_type/thrift_grammar.java     |  2 +-
 .../hive/serde2/lazy/LazySerDeParameters.java   |  3 ++-
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |  2 +-
 .../serde2/thrift/TCTLSeparatedProtocol.java    | 25 +++++++++-----------
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |  6 ++---
 .../hive/service/cli/HiveSQLException.java      |  6 ++---
 .../service/cli/thrift/ThriftCLIService.java    |  4 ++--
 .../junit/runners/ConcurrentTestRunner.java     |  4 ++--
 48 files changed, 116 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index 98d4e09..5e6e9ba 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -664,8 +664,8 @@ public class BeeLine implements Closeable {
     }
 
     dbName = commandLine.getOptionValue("database");
-    getOpts().setVerbose(Boolean.valueOf(commandLine.getOptionValue("verbose")));
-    getOpts().setSilent(Boolean.valueOf(commandLine.getOptionValue("slient")));
+    getOpts().setVerbose(Boolean.parseBoolean(commandLine.getOptionValue("verbose")));
+    getOpts().setSilent(Boolean.parseBoolean(commandLine.getOptionValue("slient")));
 
     int code = 0;
     if (commandLine.getOptionValues("e") != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java b/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
index 61b84ef..66d9fd0 100644
--- a/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
+++ b/beeline/src/java/org/apache/hive/beeline/SeparatedValuesOutputFormat.java
@@ -108,7 +108,7 @@ class SeparatedValuesOutputFormat implements OutputFormat {
     }
     String parsedOptionStr = quotingDisabledStr.toLowerCase();
     if (parsedOptionStr.equals("false") || parsedOptionStr.equals("true")) {
-      return Boolean.valueOf(parsedOptionStr);
+      return Boolean.parseBoolean(parsedOptionStr);
     } else {
       beeLine.error("System Property disable.quoting.for.sv is now " + parsedOptionStr
           + " which only accepts boolean value");

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index 51340d8..f7d41cd 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -295,7 +295,7 @@ public final class FileUtils {
       if (c == '%' && i + 2 < path.length()) {
         int code = -1;
         try {
-          code = Integer.valueOf(path.substring(i + 1, i + 3), 16);
+          code = Integer.parseInt(path.substring(i + 1, i + 3), 16);
         } catch (Exception e) {
           code = -1;
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 1702eb1..c7e5b33 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3185,12 +3185,12 @@ public class HiveConf extends Configuration {
 
   public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) {
     String[] parsed = parseNumberFollowedByUnit(value.trim());
-    return outUnit.convert(Long.valueOf(parsed[0].trim().trim()), unitFor(parsed[1].trim(), inputUnit));
+    return outUnit.convert(Long.parseLong(parsed[0].trim()), unitFor(parsed[1].trim(), inputUnit));
   }
 
   public static long toSizeBytes(String value) {
     String[] parsed = parseNumberFollowedByUnit(value.trim());
-    return Long.valueOf(parsed[0].trim()) * multiplierFor(parsed[1].trim());
+    return Long.parseLong(parsed[0].trim()) * multiplierFor(parsed[1].trim());
   }
 
   private static String[] parseNumberFollowedByUnit(String value) {

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/common/src/java/org/apache/hadoop/hive/conf/Validator.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/Validator.java b/common/src/java/org/apache/hadoop/hive/conf/Validator.java
index 3fb09b9..bb8962a 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/Validator.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/Validator.java
@@ -199,7 +199,7 @@ public interface Validator {
     @Override
     public String validate(String value) {
       try {
-        float fvalue = Float.valueOf(value);
+        float fvalue = Float.parseFloat(value);
         if (fvalue < 0 || fvalue > 1) {
           return "Invalid ratio " + value + ", which should be in between 0 to 1";
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java
----------------------------------------------------------------------
diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java
index a11d3cd..617c293 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java
@@ -69,8 +69,8 @@ public class HBaseSerDeParameters {
     // Read configuration parameters
     columnMappingString = tbl.getProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING);
     doColumnRegexMatching =
-        Boolean.valueOf(tbl.getProperty(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, "true"));
-    doColumnPrefixCut = Boolean.valueOf(tbl.getProperty(HBaseSerDe.HBASE_COLUMNS_PREFIX_HIDE, "false"));
+        Boolean.parseBoolean(tbl.getProperty(HBaseSerDe.HBASE_COLUMNS_REGEX_MATCHING, "true"));
+    doColumnPrefixCut = Boolean.parseBoolean(tbl.getProperty(HBaseSerDe.HBASE_COLUMNS_PREFIX_HIDE, "false"));
     // Parse and initialize the HBase columns mapping
     columnMappings = HBaseSerDe.parseColumnsMapping(columnMappingString, doColumnRegexMatching, doColumnPrefixCut);
 
@@ -95,7 +95,7 @@ public class HBaseSerDeParameters {
     }
 
     this.serdeParams = new LazySerDeParameters(job, tbl, serdeName);
-    this.putTimestamp = Long.valueOf(tbl.getProperty(HBaseSerDe.HBASE_PUT_TIMESTAMP, "-1"));
+    this.putTimestamp = Long.parseLong(tbl.getProperty(HBaseSerDe.HBASE_PUT_TIMESTAMP, "-1"));
 
     columnMappings.setHiveColumnDescription(serdeName, serdeParams.getColumnNames(),
         serdeParams.getColumnTypes());

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
----------------------------------------------------------------------
diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
index c002070..6054d53 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseInputFormatUtil.java
@@ -124,15 +124,15 @@ class HiveHBaseInputFormatUtil {
 
     String scanCache = jobConf.get(HBaseSerDe.HBASE_SCAN_CACHE);
     if (scanCache != null) {
-      scan.setCaching(Integer.valueOf(scanCache));
+      scan.setCaching(Integer.parseInt(scanCache));
     }
     String scanCacheBlocks = jobConf.get(HBaseSerDe.HBASE_SCAN_CACHEBLOCKS);
     if (scanCacheBlocks != null) {
-      scan.setCacheBlocks(Boolean.valueOf(scanCacheBlocks));
+      scan.setCacheBlocks(Boolean.parseBoolean(scanCacheBlocks));
     }
     String scanBatch = jobConf.get(HBaseSerDe.HBASE_SCAN_BATCH);
     if (scanBatch != null) {
-      scan.setBatch(Integer.valueOf(scanBatch));
+      scan.setBatch(Integer.parseInt(scanBatch));
     }
     return scan;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
index 367f4ea..9db3dc1 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -107,7 +107,7 @@ class FileOutputCommitterContainer extends OutputCommitterContainer {
     this.partitionsDiscovered = !dynamicPartitioningUsed;
     cachedStorageHandler = HCatUtil.getStorageHandler(context.getConfiguration(), jobInfo.getTableInfo().getStorerInfo());
     Table table = new Table(jobInfo.getTableInfo().getTable());
-    if (dynamicPartitioningUsed && Boolean.valueOf((String)table.getProperty("EXTERNAL"))
+    if (dynamicPartitioningUsed && Boolean.parseBoolean((String)table.getProperty("EXTERNAL"))
         && jobInfo.getCustomDynamicPath() != null
         && jobInfo.getCustomDynamicPath().length() > 0) {
       customDynamicLocationUsed = true;
@@ -355,7 +355,7 @@ class FileOutputCommitterContainer extends OutputCommitterContainer {
     if (customDynamicLocationUsed) {
       partPath = new Path(dynPartPath);
     } else if (!dynamicPartitioningUsed
-         && Boolean.valueOf((String)table.getProperty("EXTERNAL"))
+         && Boolean.parseBoolean((String)table.getProperty("EXTERNAL"))
          && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) {
       // Now, we need to de-scratchify this location - i.e., get rid of any
       // _SCRATCH[\d].?[\d]+ from the location.

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
index ef7aa48..14f7316 100644
--- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
+++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FosterStorageHandler.java
@@ -157,7 +157,7 @@ public class FosterStorageHandler extends DefaultStorageHandler {
       // we create a temp dir for the associated write job
       if (dynHash != null) {
         // if external table and custom root specified, update the parent path
-        if (Boolean.valueOf((String)tableDesc.getProperties().get("EXTERNAL"))
+        if (Boolean.parseBoolean((String)tableDesc.getProperties().get("EXTERNAL"))
             && jobInfo.getCustomDynamicRoot() != null
             && jobInfo.getCustomDynamicRoot().length() > 0) {
           parentPath = new Path(parentPath, jobInfo.getCustomDynamicRoot()).toString();
@@ -170,14 +170,14 @@ public class FosterStorageHandler extends DefaultStorageHandler {
       String outputLocation;
 
       if ((dynHash != null)
-          && Boolean.valueOf((String)tableDesc.getProperties().get("EXTERNAL"))
+          && Boolean.parseBoolean((String)tableDesc.getProperties().get("EXTERNAL"))
           && jobInfo.getCustomDynamicPath() != null
           && jobInfo.getCustomDynamicPath().length() > 0) {
         // dynamic partitioning with custom path; resolve the custom path
         // using partition column values
         outputLocation = HCatFileUtil.resolveCustomPath(jobInfo, null, true);
       } else if ((dynHash == null)
-           && Boolean.valueOf((String)tableDesc.getProperties().get("EXTERNAL"))
+           && Boolean.parseBoolean((String)tableDesc.getProperties().get("EXTERNAL"))
            && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) {
         // honor custom location for external table apart from what metadata specifies
         outputLocation = jobInfo.getLocation();

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/StreamingIntegrationTester.java
----------------------------------------------------------------------
diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/StreamingIntegrationTester.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/StreamingIntegrationTester.java
index 0fcc103..bf2cc63 100644
--- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/StreamingIntegrationTester.java
+++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/StreamingIntegrationTester.java
@@ -166,12 +166,12 @@ public class StreamingIntegrationTester {
     String db = cmdline.getOptionValue('d');
     String table = cmdline.getOptionValue('t');
     String uri = cmdline.getOptionValue('m');
-    int txnsPerBatch = Integer.valueOf(cmdline.getOptionValue('n', "100"));
-    int writers = Integer.valueOf(cmdline.getOptionValue('w', "2"));
-    int batches = Integer.valueOf(cmdline.getOptionValue('i', "10"));
-    int recordsPerTxn = Integer.valueOf(cmdline.getOptionValue('r', "100"));
-    int frequency = Integer.valueOf(cmdline.getOptionValue('f', "1"));
-    int ap = Integer.valueOf(cmdline.getOptionValue('a', "5"));
+    int txnsPerBatch = Integer.parseInt(cmdline.getOptionValue('n', "100"));
+    int writers = Integer.parseInt(cmdline.getOptionValue('w', "2"));
+    int batches = Integer.parseInt(cmdline.getOptionValue('i', "10"));
+    int recordsPerTxn = Integer.parseInt(cmdline.getOptionValue('r', "100"));
+    int frequency = Integer.parseInt(cmdline.getOptionValue('f', "1"));
+    int ap = Integer.parseInt(cmdline.getOptionValue('a', "5"));
     float abortPct = ((float)ap) / 100.0f;
     String[] partVals = cmdline.getOptionValues('p');
     String[] cols = cmdline.getOptionValues('c');

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java
index 91cbd18..00527a1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/StartMiniHS2Cluster.java
@@ -30,7 +30,7 @@ public class StartMiniHS2Cluster {
 
     MiniClusterType clusterType = MiniClusterType.valueOf(System.getProperty("miniHS2.clusterType", "MR").toUpperCase());
     String confFilesProperty = System.getProperty("miniHS2.conf", "../../data/conf/hive-site.xml");
-    boolean usePortsFromConf = Boolean.valueOf(System.getProperty("miniHS2.usePortsFromConf", "false"));
+    boolean usePortsFromConf = Boolean.parseBoolean(System.getProperty("miniHS2.usePortsFromConf", "false"));
 
     // Load conf files
     String[] confFiles = confFilesProperty.split(",");

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
index 98d0370..88ba853 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java
@@ -289,7 +289,7 @@ public abstract class HiveBaseResultSet implements ResultSet {
       } else if (obj == null) {
         return 0;
       } else if (String.class.isInstance(obj)) {
-        return Double.valueOf((String)obj);
+        return Double.parseDouble((String)obj);
       }
       throw new Exception("Illegal conversion");
     } catch (Exception e) {
@@ -318,7 +318,7 @@ public abstract class HiveBaseResultSet implements ResultSet {
       } else if (obj == null) {
         return 0;
       } else if (String.class.isInstance(obj)) {
-        return Float.valueOf((String)obj);
+        return Float.parseFloat((String)obj);
       }
       throw new Exception("Illegal conversion");
     } catch (Exception e) {
@@ -343,7 +343,7 @@ public abstract class HiveBaseResultSet implements ResultSet {
       } else if (obj == null) {
         return 0;
       } else if (String.class.isInstance(obj)) {
-        return Integer.valueOf((String)obj);
+        return Integer.parseInt((String)obj);
       }
       throw new Exception("Illegal conversion");
     } catch (Exception e) {
@@ -365,7 +365,7 @@ public abstract class HiveBaseResultSet implements ResultSet {
       } else if (obj == null) {
         return 0;
       } else if (String.class.isInstance(obj)) {
-        return Long.valueOf((String)obj);
+        return Long.parseLong((String)obj);
       }
       throw new Exception("Illegal conversion");
     } catch (Exception e) {
@@ -511,7 +511,7 @@ public abstract class HiveBaseResultSet implements ResultSet {
       } else if (obj == null) {
         return 0;
       } else if (String.class.isInstance(obj)) {
-        return Short.valueOf((String)obj);
+        return Short.parseShort((String)obj);
       }
       throw new Exception("Illegal conversion");
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
----------------------------------------------------------------------
diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
index 275cbc2..6af30d4 100644
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
@@ -328,13 +328,13 @@ public class LlapZookeeperRegistryImpl implements ServiceRegistry {
           RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
               AddressTypes.ADDRESS_HOSTNAME_FIELD);
       this.rpcPort =
-          Integer.valueOf(RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
+          Integer.parseInt(RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
               AddressTypes.ADDRESS_PORT_FIELD));
       this.mngPort =
-          Integer.valueOf(RegistryTypeUtils.getAddressField(mng.addresses.get(0),
+          Integer.parseInt(RegistryTypeUtils.getAddressField(mng.addresses.get(0),
               AddressTypes.ADDRESS_PORT_FIELD));
       this.shufflePort =
-          Integer.valueOf(RegistryTypeUtils.getAddressField(shuffle.addresses.get(0),
+          Integer.parseInt(RegistryTypeUtils.getAddressField(shuffle.addresses.get(0),
               AddressTypes.ADDRESS_PORT_FIELD));
       this.serviceAddress =
           RegistryTypeUtils.getAddressField(services.addresses.get(0), AddressTypes.ADDRESS_URI);
@@ -383,8 +383,8 @@ public class LlapZookeeperRegistryImpl implements ServiceRegistry {
 
     @Override
     public Resource getResource() {
-      int memory = Integer.valueOf(srv.get(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname));
-      int vCores = Integer.valueOf(srv.get(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname));
+      int memory = Integer.parseInt(srv.get(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname));
+      int vCores = Integer.parseInt(srv.get(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname));
       return Resource.newInstance(memory, vCores);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
index 39a1468..9a3e221 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java
@@ -658,7 +658,7 @@ public class ShuffleHandler implements AttemptRegistrationListener {
       final List<String> keepAliveList = q.get("keepAlive");
       boolean keepAliveParam = false;
       if (keepAliveList != null && keepAliveList.size() == 1) {
-        keepAliveParam = Boolean.valueOf(keepAliveList.get(0));
+        keepAliveParam = Boolean.parseBoolean(keepAliveList.get(0));
         if (LOG.isDebugEnabled()) {
           LOG.debug("KeepAliveParam : " + keepAliveList
             + " : " + keepAliveParam);

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
index 7c7f7ce..9c30ee7 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
@@ -193,8 +193,8 @@ public class MetaStoreSchemaInfo {
     }
 
     for (int i = 0; i < dbVerParts.length; i++) {
-      Integer dbVerPart = Integer.valueOf(dbVerParts[i]);
-      Integer hiveVerPart = Integer.valueOf(hiveVerParts[i]);
+      int dbVerPart = Integer.parseInt(dbVerParts[i]);
+      int hiveVerPart = Integer.parseInt(hiveVerParts[i]);
       if (dbVerPart > hiveVerPart) {
         return true;
       } else if (dbVerPart < hiveVerPart) {

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java
index ba5cb22..434bd9e 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseImport.java
@@ -206,7 +206,7 @@ public class HBaseImport {
       doAll = true;
     }
     if (cli.hasOption('b')) {
-      batchSize = Integer.valueOf(cli.getOptionValue('b'));
+      batchSize = Integer.parseInt(cli.getOptionValue('b'));
     }
     if (cli.hasOption('d')) {
       hasCmd = true;
@@ -217,7 +217,7 @@ public class HBaseImport {
       functionsToImport = Arrays.asList(cli.getOptionValues('f'));
     }
     if (cli.hasOption('p')) {
-      parallel = Integer.valueOf(cli.getOptionValue('p'));
+      parallel = Integer.parseInt(cli.getOptionValue('p'));
     }
     if (cli.hasOption('r')) {
       hasCmd = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
index 7ed825f..2860875 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseReadWrite.java
@@ -1783,7 +1783,7 @@ public class HBaseReadWrite implements MetadataStore {
       // Someone deleted it before we got to it, no worries
       return;
     }
-    int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING));
+    int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING));
     HTableInterface htab = conn.getHBaseTable(SD_TABLE);
     if (--refCnt < 1) {
       Delete d = new Delete(key);
@@ -1823,7 +1823,7 @@ public class HBaseReadWrite implements MetadataStore {
       sdCache.put(new ByteArrayWrapper(key), storageDescriptor);
     } else {
       // Just increment the reference count
-      int refCnt = Integer.valueOf(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1;
+      int refCnt = Integer.parseInt(new String(serializedRefCnt, HBaseUtils.ENCODING)) + 1;
       Put p = new Put(key);
       p.add(CATALOG_CF, REF_COUNT_COL, Integer.toString(refCnt).getBytes(HBaseUtils.ENCODING));
       htab.put(p);
@@ -2377,7 +2377,7 @@ public class HBaseReadWrite implements MetadataStore {
       Result result = iter.next();
       byte[] val =  result.getValue(CATALOG_CF, MASTER_KEY_COL);
       if (val != null) {
-        int seqNo = Integer.valueOf(new String(result.getRow(), HBaseUtils.ENCODING));
+        int seqNo = Integer.parseInt(new String(result.getRow(), HBaseUtils.ENCODING));
         lines.add("Master key " + seqNo + ": " + HBaseUtils.deserializeMasterKey(val));
       } else {
         val = result.getValue(CATALOG_CF, DELEGATION_TOKEN_COL);
@@ -2395,14 +2395,14 @@ public class HBaseReadWrite implements MetadataStore {
 
   long peekAtSequence(byte[] sequence) throws IOException {
     byte[] serialized = read(SEQUENCES_TABLE, sequence, CATALOG_CF, CATALOG_COL);
-    return serialized == null ? 0 : Long.valueOf(new String(serialized, HBaseUtils.ENCODING));
+    return serialized == null ? 0 : Long.parseLong(new String(serialized, HBaseUtils.ENCODING));
   }
 
   long getNextSequence(byte[] sequence) throws IOException {
     byte[] serialized = read(SEQUENCES_TABLE, sequence, CATALOG_CF, CATALOG_COL);
     long val = 0;
     if (serialized != null) {
-      val = Long.valueOf(new String(serialized, HBaseUtils.ENCODING));
+      val = Long.parseLong(new String(serialized, HBaseUtils.ENCODING));
     }
     byte[] incrSerialized = new Long(val + 1).toString().getBytes(HBaseUtils.ENCODING);
     store(SEQUENCES_TABLE, sequence, CATALOG_CF, CATALOG_COL, incrSerialized);

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 0899793..ec6381b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -539,7 +539,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
           int numReducers = totalFiles / numFiles;
 
           if (numReducers > 1) {
-            int currReducer = Integer.valueOf(Utilities.getTaskIdFromFilename(Utilities
+            int currReducer = Integer.parseInt(Utilities.getTaskIdFromFilename(Utilities
                 .getTaskId(hconf)));
 
             int reducerIdx = prtner.getPartition(key, null, numReducers);
@@ -623,7 +623,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         // Only set up the updater for insert.  For update and delete we don't know unitl we see
         // the row.
         ObjectInspector inspector = bDynParts ? subSetOI : outputObjInspector;
-        int acidBucketNum = Integer.valueOf(Utilities.getTaskIdFromFilename(taskId));
+        int acidBucketNum = Integer.parseInt(Utilities.getTaskIdFromFilename(taskId));
         fsp.updaters[filesIdx] = HiveFileFormatUtils.getAcidRecordUpdater(jc, conf.getTableInfo(),
             acidBucketNum, conf, fsp.outPaths[filesIdx], inspector, reporter, -1);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java
index ec0dd81..ca89640 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java
@@ -165,7 +165,8 @@ public enum ETypeConverter {
           Map<String, String> metadata = parent.getMetadata();
           //Current Hive parquet timestamp implementation stores it in UTC, but other components do not do that.
           //If this file written by current Hive implementation itself, we need to do the reverse conversion, else skip the conversion.
-          boolean skipConversion = Boolean.valueOf(metadata.get(HiveConf.ConfVars.HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION.varname));
+          boolean skipConversion = Boolean.parseBoolean(
+              metadata.get(HiveConf.ConfVars.HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION.varname));
           Timestamp ts = NanoTimeUtils.getTimestamp(nt, skipConversion);
           return new TimestampWritable(ts);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveStructConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveStructConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveStructConverter.java
index e4907d2..a89aa4d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveStructConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveStructConverter.java
@@ -118,7 +118,7 @@ public class HiveStructConverter extends HiveGroupConverter {
 
   private TypeInfo getStructFieldTypeInfo(String field, int fieldIndex) {
     String fieldLowerCase = field.toLowerCase();
-    if (Boolean.valueOf(getMetadata().get(DataWritableReadSupport.PARQUET_COLUMN_INDEX_ACCESS))
+    if (Boolean.parseBoolean(getMetadata().get(DataWritableReadSupport.PARQUET_COLUMN_INDEX_ACCESS))
         && fieldIndex < hiveFieldNames.size()) {
       return hiveFieldTypeInfos.get(fieldIndex);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
index 2f838fc..c021daf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java
@@ -77,7 +77,7 @@ public class ParquetRecordWriterWrapper implements RecordWriter<NullWritable, Pa
     Configuration conf = ContextUtil.getConfiguration(job);
     if (blockSize != null && !blockSize.isEmpty()) {
       LOG.debug("get override parquet.block.size property via tblproperties");
-      conf.setInt(ParquetOutputFormat.BLOCK_SIZE, Integer.valueOf(blockSize));
+      conf.setInt(ParquetOutputFormat.BLOCK_SIZE, Integer.parseInt(blockSize));
     }
 
     String enableDictionaryPage =
@@ -85,7 +85,7 @@ public class ParquetRecordWriterWrapper implements RecordWriter<NullWritable, Pa
     if (enableDictionaryPage != null && !enableDictionaryPage.isEmpty()) {
       LOG.debug("get override parquet.enable.dictionary property via tblproperties");
       conf.setBoolean(ParquetOutputFormat.ENABLE_DICTIONARY,
-        Boolean.valueOf(enableDictionaryPage));
+        Boolean.parseBoolean(enableDictionaryPage));
     }
 
     String compressionName = tableProperties.getProperty(ParquetOutputFormat.COMPRESSION);

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index c27481f..4c9acce 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -2697,7 +2697,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
     String fullF1 = getQualifiedPathWithoutSchemeAndAuthority(srcf, srcFs);
     String fullF2 = getQualifiedPathWithoutSchemeAndAuthority(destf, destFs);
 
-    boolean isInTest = Boolean.valueOf(HiveConf.getBoolVar(srcFs.getConf(), ConfVars.HIVE_IN_TEST));
+    boolean isInTest = HiveConf.getBoolVar(srcFs.getConf(), ConfVars.HIVE_IN_TEST);
     // In the automation, the data warehouse is the local file system based.
     LOG.debug("The source path is " + fullF1 + " and the destination path is " + fullF2);
     if (isInTest) {

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java
index f8aec84..9670daf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SizeBasedBigTableSelectorForAutoSMJ.java
@@ -57,7 +57,7 @@ public abstract class SizeBasedBigTableSelectorForAutoSMJ {
     // If the size is present in the metastore, use it
     if (size != null) {
       try {
-        return Long.valueOf(size);
+        return Long.parseLong(size);
       } catch (NumberFormatException e) {
         return -1;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java
index 6522714..0c13ee7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveAlgorithmsUtil.java
@@ -48,17 +48,17 @@ public class HiveAlgorithmsUtil {
   private final double hdfsRead;
 
   HiveAlgorithmsUtil(HiveConf conf) {
-    cpuCost = Double.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_CPU));
+    cpuCost = Double.parseDouble(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_CPU));
     netCost = cpuCost
-        * Double.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_NET));
+        * Double.parseDouble(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_NET));
     localFSWrite = netCost
-        * Double.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_LFS_WRITE));
+        * Double.parseDouble(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_LFS_WRITE));
     localFSRead = netCost
-        * Double.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_LFS_READ));
+        * Double.parseDouble(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_LFS_READ));
     hdfsWrite = localFSWrite
-        * Double.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_HDFS_WRITE));
+        * Double.parseDouble(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_HDFS_WRITE));
     hdfsRead = localFSRead
-        * Double.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_HDFS_READ));
+        * Double.parseDouble(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_CBO_COST_MODEL_HDFS_READ));
   }
 
   public static RelOptCost computeCardinalityBasedCost(HiveRelNode hr) {

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index 16b055b..62f9d14 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -92,7 +92,7 @@ public class ASTNode extends CommonTree implements Node,Serializable {
    */
   @Override
   public String getName() {
-    return (Integer.valueOf(super.getToken().getType())).toString();
+    return String.valueOf(super.getToken().getType());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index fe9b8cc..46d2342 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -1702,10 +1702,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       List<Order> sortCols = new ArrayList<Order>();
       int numBuckets = -1;
       if (buckets.getChildCount() == 2) {
-        numBuckets = (Integer.valueOf(buckets.getChild(1).getText())).intValue();
+        numBuckets = Integer.parseInt(buckets.getChild(1).getText());
       } else {
         sortCols = getColumnNamesOrder((ASTNode) buckets.getChild(1));
-        numBuckets = (Integer.valueOf(buckets.getChild(2).getText())).intValue();
+        numBuckets = Integer.parseInt(buckets.getChild(2).getText());
       }
       if (numBuckets <= 0) {
         throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg());

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
index 5f13277..a9e503d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java
@@ -138,7 +138,7 @@ public final class ParseUtils {
     }
 
     String lengthStr = node.getChild(0).getText();
-    return TypeInfoFactory.getVarcharTypeInfo(Integer.valueOf(lengthStr));
+    return TypeInfoFactory.getVarcharTypeInfo(Integer.parseInt(lengthStr));
   }
 
   public static CharTypeInfo getCharTypeInfo(ASTNode node)
@@ -148,7 +148,7 @@ public final class ParseUtils {
     }
 
     String lengthStr = node.getChild(0).getText();
-    return TypeInfoFactory.getCharTypeInfo(Integer.valueOf(lengthStr));
+    return TypeInfoFactory.getCharTypeInfo(Integer.parseInt(lengthStr));
   }
 
   static int getIndex(String[] list, String elem) {
@@ -212,12 +212,12 @@ public final class ParseUtils {
 
       if (node.getChildCount() >= 1) {
         String precStr = node.getChild(0).getText();
-        precision = Integer.valueOf(precStr);
+        precision = Integer.parseInt(precStr);
       }
 
       if (node.getChildCount() == 2) {
         String scaleStr = node.getChild(1).getText();
-        scale = Integer.valueOf(scaleStr);
+        scale = Integer.parseInt(scaleStr);
       }
 
       return TypeInfoFactory.getDecimalTypeInfo(precision, scale);

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
index 5f80528..4668271 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java
@@ -126,7 +126,8 @@ public class ReplicationSpec {
     }
     this.eventId = keyFetcher.apply(ReplicationSpec.KEY.EVENT_ID.toString());
     this.currStateId = keyFetcher.apply(ReplicationSpec.KEY.CURR_STATE_ID.toString());
-    this.isNoop = Boolean.valueOf(keyFetcher.apply(ReplicationSpec.KEY.NOOP.toString())).booleanValue();
+    this.isNoop = Boolean.parseBoolean(
+        keyFetcher.apply(ReplicationSpec.KEY.NOOP.toString()));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 987f25d..9b565c5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -737,11 +737,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM);
         sample = new SplitSample(percent, seedNum);
       } else if (type.getType() == HiveParser.TOK_ROWCOUNT) {
-        sample = new SplitSample(Integer.valueOf(value));
+        sample = new SplitSample(Integer.parseInt(value));
       } else {
         assert type.getType() == HiveParser.TOK_LENGTH;
         assertCombineInputFormat(numerator, "Total Length");
-        long length = Integer.valueOf(value.substring(0, value.length() - 1));
+        long length = Integer.parseInt(value.substring(0, value.length() - 1));
         char last = value.charAt(value.length() - 1);
         if (last == 'k' || last == 'K') {
           length <<= 10;
@@ -11384,12 +11384,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       case HiveParser.TOK_ALTERTABLE_BUCKETS:
         bucketCols = getColumnNames((ASTNode) child.getChild(0));
         if (child.getChildCount() == 2) {
-          numBuckets = (Integer.valueOf(child.getChild(1).getText()))
-              .intValue();
+          numBuckets = Integer.parseInt(child.getChild(1).getText());
         } else {
           sortCols = getColumnNamesOrder((ASTNode) child.getChild(1));
-          numBuckets = (Integer.valueOf(child.getChild(2).getText()))
-              .intValue();
+          numBuckets = Integer.parseInt(child.getChild(2).getText());
         }
         break;
       case HiveParser.TOK_TABLEROWFORMAT:

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
index c74b4c0..ac71565 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TableSample.java
@@ -68,8 +68,8 @@ public class TableSample {
    *          The list of expressions in the ON part of the TABLESAMPLE clause
    */
   public TableSample(String num, String den, ArrayList<ASTNode> exprs) {
-    numerator = Integer.valueOf(num).intValue();
-    denominator = Integer.valueOf(den).intValue();
+    numerator = Integer.parseInt(num);
+    denominator = Integer.parseInt(den);
     this.exprs = exprs;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
index aa33103..8a85574 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java
@@ -440,7 +440,7 @@ public class GenSparkUtils {
     if (fso != null) {
       String bucketCount = fso.getConf().getTableInfo().getProperties().getProperty(
           hive_metastoreConstants.BUCKET_COUNT);
-      if (bucketCount != null && Integer.valueOf(bucketCount) > 1) {
+      if (bucketCount != null && Integer.parseInt(bucketCount) > 1) {
         edgeProperty.setMRShuffle();
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
index ae8c77f..2992568 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
@@ -567,7 +567,7 @@ public final class PlanUtils {
     List<FieldSchema> schemas = new ArrayList<FieldSchema>(cols.size());
     for (int i = 0; i < cols.size(); i++) {
       String name = cols.get(i).getInternalName();
-      if (name.equals(Integer.valueOf(i).toString())) {
+      if (name.equals(String.valueOf(i))) {
         name = fieldPrefix + name;
       }
       schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo(name, cols.get(i)

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
index e2aaa70..07df15a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsAggregator.java
@@ -94,7 +94,7 @@ public class FSStatsAggregator implements StatsAggregator {
       if (null == statVal) { // partition was found, but was empty.
         continue;
       }
-      counter += Long.valueOf(statVal);
+      counter += Long.parseLong(statVal);
     }
     LOG.info("Read stats for : " + partID + "\t" + statType + "\t" + counter);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
index e5d89e8..3a49b30 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/fs/FSStatsPublisher.java
@@ -83,7 +83,8 @@ public class FSStatsPublisher implements StatsPublisher {
     if (null != statMap) {
       // In case of LB, we might get called repeatedly.
       for (Entry<String, String> e : statMap.entrySet()) {
-        cpy.put(e.getKey(), String.valueOf(Long.valueOf(e.getValue()) + Long.valueOf(cpy.get(e.getKey()))));
+        cpy.put(e.getKey(),
+            String.valueOf(Long.parseLong(e.getValue()) + Long.parseLong(cpy.get(e.getKey()))));
       }
     }
     statsMap.put(partKV, cpy);

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
index f1f1db2..931be90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
@@ -484,7 +484,7 @@ public class CompactorMR {
         LOG.warn("Found a non-bucket file that we thought matched the bucket pattern! " +
             file.toString() + " Matcher=" + matcher.toString());
       }
-      int bucketNum = Integer.valueOf(matcher.group());
+      int bucketNum = Integer.parseInt(matcher.group());
       BucketTracker bt = splitToBucketMap.get(bucketNum);
       if (bt == null) {
         bt = new BucketTracker();
@@ -628,15 +628,15 @@ public class CompactorMR {
     StringableMap(String s) {
       String[] parts = s.split(":", 2);
       // read that many chars
-      int numElements = Integer.valueOf(parts[0]);
+      int numElements = Integer.parseInt(parts[0]);
       s = parts[1];
       for (int i = 0; i < numElements; i++) {
         parts = s.split(":", 2);
-        int len = Integer.valueOf(parts[0]);
+        int len = Integer.parseInt(parts[0]);
         String key = null;
         if (len > 0) key = parts[1].substring(0, len);
         parts = parts[1].substring(len).split(":", 2);
-        len = Integer.valueOf(parts[0]);
+        len = Integer.parseInt(parts[0]);
         String value = null;
         if (len > 0) value = parts[1].substring(0, len);
         s = parts[1].substring(len);
@@ -683,11 +683,11 @@ public class CompactorMR {
     StringableList(String s) {
       String[] parts = s.split(":", 2);
       // read that many chars
-      int numElements = Integer.valueOf(parts[0]);
+      int numElements = Integer.parseInt(parts[0]);
       s = parts[1];
       for (int i = 0; i < numElements; i++) {
         parts = s.split(":", 2);
-        int len = Integer.valueOf(parts[0]);
+        int len = Integer.parseInt(parts[0]);
         String val = parts[1].substring(0, len);
         s = parts[1].substring(len);
         add(new Path(val));

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
index e932f11..9cbc114 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
@@ -169,7 +169,7 @@ public class UDFToDouble extends UDF {
         return null;
       }
       try {
-        doubleWritable.set(Double.valueOf(i.toString()));
+        doubleWritable.set(Double.parseDouble(i.toString()));
         return doubleWritable;
       } catch (NumberFormatException e) {
         // MySQL returns 0 if the string is not a well-formed double value.

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index 119eaca..c612307 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -170,7 +170,7 @@ public class UDFToFloat extends UDF {
         return null;
       }
       try {
-        floatWritable.set(Float.valueOf(i.toString()));
+        floatWritable.set(Float.parseFloat(i.toString()));
         return floatWritable;
       } catch (NumberFormatException e) {
         // MySQL returns 0 if the string is not a well-formed numeric value.

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java
index 04f72a6..eb0f9e2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBetween.java
@@ -75,7 +75,7 @@ public class GenericUDFBetween extends GenericUDF {
   public String getDisplayString(String[] children) {
     StringBuilder sb = new StringBuilder();
     sb.append(children[1]);
-    if (Boolean.valueOf(children[0])) {
+    if (Boolean.parseBoolean(children[0])) {
       sb.append(" NOT");
     }
     sb.append(" BETWEEN ");

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/ql/src/test/results/clientnegative/dyn_part_max.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/dyn_part_max.q.out b/ql/src/test/results/clientnegative/dyn_part_max.q.out
index dd5e8e9..4386720 100644
--- a/ql/src/test/results/clientnegative/dyn_part_max.q.out
+++ b/ql/src/test/results/clientnegative/dyn_part_max.q.out
@@ -26,4 +26,4 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@src
 PREHOOK: Output: default@max_parts
 Failed with exception Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49.
-FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.MoveTask. Number of dynamic partitions created is 49, which is more than 10. To solve this try to set hive.exec.max.dynamic.partitions to at least 49.

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java
index a0a790c..551a9da 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/MetadataTypedColumnsetSerDe.java
@@ -77,7 +77,7 @@ public class MetadataTypedColumnsetSerDe extends AbstractSerDe {
     if (altValue != null && altValue.length() > 0) {
       try {
         byte[] b = new byte[1];
-        b[0] = Byte.valueOf(altValue).byteValue();
+        b[0] = Byte.parseByte(altValue);
         return new String(b);
       } catch (NumberFormatException e) {
         return altValue;

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/thrift_grammar.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/thrift_grammar.java b/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/thrift_grammar.java
index 7fca311..b275af0 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/thrift_grammar.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/dynamic_type/thrift_grammar.java
@@ -2052,7 +2052,7 @@ public class thrift_grammar/* @bgen(jjtree) */implements
       jjtree.closeNodeScope(jjtn000, true);
       jjtc000 = false;
       if (fidnum.length() > 0) {
-        int fidInt = Integer.valueOf(fidnum);
+        int fidInt = Integer.parseInt(fidnum);
         jjtn000.fieldid = fidInt;
       } else {
         jjtn000.fieldid = field_val--;

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
index 46dfaa7..7232d0b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazySerDeParameters.java
@@ -104,7 +104,8 @@ public class LazySerDeParameters implements LazyObjectInspectorParameters {
         needsEscape[b & 0xFF] = true;         // Converts the negative byte into positive index
       }
 
-      boolean isEscapeCRLF = Boolean.valueOf(tbl.getProperty(serdeConstants.SERIALIZATION_ESCAPE_CRLF));
+      boolean isEscapeCRLF =
+          Boolean.parseBoolean(tbl.getProperty(serdeConstants.SERIALIZATION_ESCAPE_CRLF));
       if (isEscapeCRLF) {
         needsEscape['\r'] = true;
         needsEscape['\n'] = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
index ee39196..6d7369b 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
@@ -490,7 +490,7 @@ public final class LazyUtils {
   public static byte getByte(String altValue, byte defaultVal) {
     if (altValue != null && altValue.length() > 0) {
       try {
-        return Byte.valueOf(altValue).byteValue();
+        return Byte.parseByte(altValue);
       } catch (NumberFormatException e) {
         return (byte) altValue.charAt(0);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java
index 6144052..ad1f872 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/thrift/TCTLSeparatedProtocol.java
@@ -423,7 +423,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
     if (altValue != null && altValue.length() > 0) {
       try {
         byte[] b = new byte[1];
-        b[0] = Byte.valueOf(altValue).byteValue();
+        b[0] = Byte.parseByte(altValue);
         return new String(b);
       } catch (NumberFormatException e) {
         return altValue;
@@ -451,11 +451,10 @@ public class TCTLSeparatedProtocol extends TProtocol implements
         rowSeparator);
     mapSeparator = getByteValue(tbl.getProperty(serdeConstants.MAPKEY_DELIM),
         mapSeparator);
-    returnNulls = Boolean.valueOf(
-        tbl.getProperty(ReturnNullsKey, String.valueOf(returnNulls)))
-        .booleanValue();
-    bufferSize = Integer.valueOf(
-        tbl.getProperty(BufferSizeKey, String.valueOf(bufferSize))).intValue();
+    returnNulls = Boolean.parseBoolean(
+        tbl.getProperty(ReturnNullsKey, String.valueOf(returnNulls)));
+    bufferSize = Integer.parseInt(
+        tbl.getProperty(BufferSizeKey, String.valueOf(bufferSize)));
     nullString = tbl.getProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, "\\N");
     quote = tbl.getProperty(serdeConstants.QUOTE_CHAR, null);
 
@@ -766,8 +765,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
   public boolean readBool() throws TException {
     String val = readString();
     lastPrimitiveWasNullFlag = val == null;
-    return val == null || val.isEmpty() ? false : Boolean.valueOf(val)
-        .booleanValue();
+    return val == null || val.isEmpty() ? false : Boolean.parseBoolean(val);
   }
 
   @Override
@@ -775,7 +773,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
     String val = readString();
     lastPrimitiveWasNullFlag = val == null;
     try {
-      return val == null || val.isEmpty() ? 0 : Byte.valueOf(val).byteValue();
+      return val == null || val.isEmpty() ? 0 : Byte.parseByte(val);
     } catch (NumberFormatException e) {
       lastPrimitiveWasNullFlag = true;
       return 0;
@@ -787,7 +785,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
     String val = readString();
     lastPrimitiveWasNullFlag = val == null;
     try {
-      return val == null || val.isEmpty() ? 0 : Short.valueOf(val).shortValue();
+      return val == null || val.isEmpty() ? 0 : Short.parseShort(val);
     } catch (NumberFormatException e) {
       lastPrimitiveWasNullFlag = true;
       return 0;
@@ -799,7 +797,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
     String val = readString();
     lastPrimitiveWasNullFlag = val == null;
     try {
-      return val == null || val.isEmpty() ? 0 : Integer.valueOf(val).intValue();
+      return val == null || val.isEmpty() ? 0 : Integer.parseInt(val);
     } catch (NumberFormatException e) {
       lastPrimitiveWasNullFlag = true;
       return 0;
@@ -811,7 +809,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
     String val = readString();
     lastPrimitiveWasNullFlag = val == null;
     try {
-      return val == null || val.isEmpty() ? 0 : Long.valueOf(val).longValue();
+      return val == null || val.isEmpty() ? 0 : Long.parseLong(val);
     } catch (NumberFormatException e) {
       lastPrimitiveWasNullFlag = true;
       return 0;
@@ -823,8 +821,7 @@ public class TCTLSeparatedProtocol extends TProtocol implements
     String val = readString();
     lastPrimitiveWasNullFlag = val == null;
     try {
-      return val == null || val.isEmpty() ? 0 : Double.valueOf(val)
-          .doubleValue();
+      return val == null || val.isEmpty() ? 0 : Double.parseDouble(val);
     } catch (NumberFormatException e) {
       lastPrimitiveWasNullFlag = true;
       return 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index d3bb4e4..16daecf 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -430,7 +430,7 @@ public final class TypeInfoUtils {
 
           int length = 1;
           if (params.length == 1) {
-            length = Integer.valueOf(params[0]);
+            length = Integer.parseInt(params[0]);
             if (typeEntry.primitiveCategory == PrimitiveCategory.VARCHAR) {
               BaseCharUtils.validateVarcharParameter(length);
               return TypeInfoFactory.getVarcharTypeInfo(length);
@@ -451,8 +451,8 @@ public final class TypeInfoUtils {
             // precision/scale. In this case, the default (10,0) is assumed. Thus, do nothing here.
           } else if (params.length == 2) {
             // New metadata always have two parameters.
-            precision = Integer.valueOf(params[0]);
-            scale = Integer.valueOf(params[1]);
+            precision = Integer.parseInt(params[0]);
+            scale = Integer.parseInt(params[1]);
             HiveDecimalUtils.validateParameter(precision, scale);
           } else if (params.length > 2) {
             throw new IllegalArgumentException("Type decimal only takes two parameter, but " +

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/service/src/java/org/apache/hive/service/cli/HiveSQLException.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/HiveSQLException.java b/service/src/java/org/apache/hive/service/cli/HiveSQLException.java
index 42aaae0..a81fe0f 100644
--- a/service/src/java/org/apache/hive/service/cli/HiveSQLException.java
+++ b/service/src/java/org/apache/hive/service/cli/HiveSQLException.java
@@ -210,8 +210,8 @@ public class HiveSQLException extends SQLException {
     String exceptionMessage = detail.substring(i1 + 1, i2);
     Throwable ex = newInstance(exceptionClass, exceptionMessage);
 
-    Integer length = Integer.valueOf(detail.substring(i2 + 1, i3));
-    Integer unique = Integer.valueOf(detail.substring(i3 + 1));
+    int length = Integer.parseInt(detail.substring(i2 + 1, i3));
+    int unique = Integer.parseInt(detail.substring(i3 + 1));
 
     int i = 0;
     StackTraceElement[] trace = new StackTraceElement[length];
@@ -226,7 +226,7 @@ public class HiveSQLException extends SQLException {
       if (fileName.isEmpty()) {
         fileName = null;
       }
-      int lineNumber = Integer.valueOf(detail.substring(j3 + 1));
+      int lineNumber = Integer.parseInt(detail.substring(j3 + 1));
       trace[i] = new StackTraceElement(className, methodName, fileName, lineNumber);
     }
     int common = trace.length - i;

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
index 0a2a761..be9833d 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
@@ -227,7 +227,7 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
               TimeUnit.SECONDS);
       portString = System.getenv("HIVE_SERVER2_THRIFT_HTTP_PORT");
       if (portString != null) {
-        portNum = Integer.valueOf(portString);
+        portNum = Integer.parseInt(portString);
       } else {
         portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT);
       }
@@ -238,7 +238,7 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
           hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME, TimeUnit.SECONDS);
       portString = System.getenv("HIVE_SERVER2_THRIFT_PORT");
       if (portString != null) {
-        portNum = Integer.valueOf(portString);
+        portNum = Integer.parseInt(portString);
       } else {
         portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/547b37dc/testutils/src/java/org/apache/hive/testutils/junit/runners/ConcurrentTestRunner.java
----------------------------------------------------------------------
diff --git a/testutils/src/java/org/apache/hive/testutils/junit/runners/ConcurrentTestRunner.java b/testutils/src/java/org/apache/hive/testutils/junit/runners/ConcurrentTestRunner.java
index 3f6cd6a..ed47481 100644
--- a/testutils/src/java/org/apache/hive/testutils/junit/runners/ConcurrentTestRunner.java
+++ b/testutils/src/java/org/apache/hive/testutils/junit/runners/ConcurrentTestRunner.java
@@ -40,7 +40,7 @@ public class ConcurrentTestRunner extends BlockJUnit4ClassRunner {
 
     String numThreadsProp = System.getProperty("test.concurrency.num.threads");
     if (numThreadsProp != null) {
-      numThreads = Integer.valueOf(numThreadsProp);
+      numThreads = Integer.parseInt(numThreadsProp);
     }
 
     setScheduler(new ConcurrentScheduler(newFixedThreadPool(numThreads, new ConcurrentTestRunnerThreadFactory())));
@@ -59,4 +59,4 @@ public class ConcurrentTestRunner extends BlockJUnit4ClassRunner {
       return new Thread(runnable, threadName);
     }
   }
-}
\ No newline at end of file
+}


[25/58] [abbrv] hive git commit: HIVE-11615 : Create test for max thrift message setting (Jason Dere via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-11615 : Create test for max thrift message setting (Jason Dere via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dc010a36
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dc010a36
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dc010a36

Branch: refs/heads/llap
Commit: dc010a362a69f0ae29b20397cbfda76468beceec
Parents: 4eef55b
Author: Jason Dere <jd...@hortonworks.com>
Authored: Thu Aug 20 18:16:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Apr 11 23:28:53 2016 -0700

----------------------------------------------------------------------
 .../thrift/ThriftCliServiceMessageSizeTest.java | 140 +++++++++++++++++++
 1 file changed, 140 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/dc010a36/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/ThriftCliServiceMessageSizeTest.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/ThriftCliServiceMessageSizeTest.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/ThriftCliServiceMessageSizeTest.java
new file mode 100644
index 0000000..fedc992
--- /dev/null
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/ThriftCliServiceMessageSizeTest.java
@@ -0,0 +1,140 @@
+package org.apache.hive.service.cli.thrift;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hive.service.Service;
+import org.apache.hive.service.auth.HiveAuthFactory.AuthTypes;
+import org.apache.hive.service.cli.SessionHandle;
+import org.apache.hive.service.server.HiveServer2;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class ThriftCliServiceMessageSizeTest {
+  protected static int port;
+  protected static String host = "localhost";
+  protected static HiveServer2 hiveServer2;
+  protected static ThriftCLIServiceClient client;
+  protected static HiveConf hiveConf;
+  protected static String USERNAME = "anonymous";
+  protected static String PASSWORD = "anonymous";
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    // Find a free port
+    port = MetaStoreUtils.findFreePort();
+    hiveServer2 = new HiveServer2();
+    hiveConf = new HiveConf();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+  }
+
+  protected static void startHiveServer2WithConf(HiveServer2 hiveServer2, HiveConf hiveConf)
+      throws Exception {
+    hiveServer2.init(hiveConf);
+    // Start HiveServer2 with given config
+    // Fail if server doesn't start
+    try {
+      hiveServer2.start();
+    } catch (Throwable t) {
+      t.printStackTrace();
+      fail();
+    }
+    // Wait for startup to complete
+    Thread.sleep(2000);
+    System.out.println("HiveServer2 started on port " + port);
+  }
+
+  protected static void stopHiveServer2(HiveServer2 hiveServer2) throws Exception {
+    if (hiveServer2 != null) {
+      hiveServer2.stop();
+    }
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+
+  }
+
+  @Test
+  public void testMessageSize() throws Exception {
+    String transportMode = "binary";
+
+    hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
+    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, host);
+    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, port);
+    hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, AuthTypes.NONE.toString());
+    hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, transportMode);
+
+    HiveServer2 hiveServer2 = new HiveServer2();
+    String url = "jdbc:hive2://localhost:" + port + "/default";
+    Class.forName("org.apache.hive.jdbc.HiveDriver");
+
+    try {
+      // First start HS2 with high message size limit. This should allow connections
+      hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE, 100*1024*1024);
+      startHiveServer2WithConf(hiveServer2, hiveConf);
+
+      System.out.println("Started Thrift CLI service with message size limit "
+          + hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE));
+
+      // With the high message size limit this connection should work
+      Connection connection = DriverManager.getConnection(url, "hiveuser", "hive");
+      Statement stmt = connection.createStatement();
+      assertNotNull("Statement is null", stmt);
+      stmt.execute("set hive.support.concurrency = false");
+      connection.close();
+      stopHiveServer2(hiveServer2);
+
+      // Now start HS2 with low message size limit. This should prevent any connections
+      hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE, 1);
+      hiveServer2 = new HiveServer2();
+      startHiveServer2WithConf(hiveServer2, hiveConf);
+      System.out.println("Started Thrift CLI service with message size limit "
+          + hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE));
+
+      Exception caughtException = null;
+      try {
+        // This should fail
+        connection = DriverManager.getConnection(url, "hiveuser", "hive");
+      } catch (Exception err) {
+        caughtException = err;
+      }
+      // Verify we hit an error while connecting
+      assertNotNull(caughtException);
+    } finally {
+      stopHiveServer2(hiveServer2);
+      hiveServer2 = null;
+    }
+  }
+}


[56/58] [abbrv] hive git commit: Revert "HIVE-13149: Remove some unnecessary HMS connections from HS2 (Reviewed by Szehon Ho, Chaoyu Tang)"

Posted by jd...@apache.org.
Revert "HIVE-13149: Remove some unnecessary HMS connections from HS2 (Reviewed by Szehon Ho, Chaoyu Tang)"

This reverts commit 37e6e1bf56d7d2fd557730380b147c745fc051ce.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/58c4e121
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/58c4e121
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/58c4e121

Branch: refs/heads/llap
Commit: 58c4e1215ec059b37fd4899a71c1789619ef5dba
Parents: 6a776f5
Author: Aihua Xu <ai...@apache.org>
Authored: Fri Apr 15 09:43:19 2016 -0400
Committer: Aihua Xu <ai...@apache.org>
Committed: Fri Apr 15 09:43:19 2016 -0400

----------------------------------------------------------------------
 .../hadoop/hive/metastore/TestMetastoreVersion.java   |  7 +++----
 .../metastore/hbase/TestHBaseMetastoreMetrics.java    |  4 +++-
 .../org/apache/hadoop/hive/hbase/HBaseQTestUtil.java  | 10 +---------
 .../org/apache/hadoop/hive/hbase/HBaseTestSetup.java  |  3 +++
 .../java/org/apache/hadoop/hive/ql/QTestUtil.java     | 14 ++++----------
 .../hadoop/hive/metastore/HiveMetaStoreClient.java    | 10 ++++------
 .../apache/hadoop/hive/ql/session/SessionState.java   |  8 ++++++++
 7 files changed, 26 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
index 5ceb3d2..53f0d0e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore;
 
 import java.io.File;
 import java.lang.reflect.Field;
+import java.util.Random;
 
 import junit.framework.TestCase;
 
@@ -31,7 +32,6 @@ import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.ql.Driver;
-import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
 
@@ -96,9 +96,8 @@ public class TestMetastoreVersion extends TestCase {
     // session creation should fail since the schema didn't get created
     try {
       SessionState.start(new CliSessionState(hiveConf));
-      Hive.get(hiveConf).getMSC();
-      fail("An exception is expected since schema is not created.");
-    } catch (Exception re) {
+      fail("Expected exception");
+    } catch (RuntimeException re) {
       LOG.info("Exception in testVersionRestriction: " + re, re);
       String msg = HiveStringUtils.stringifyException(re);
       assertTrue("Expected 'Version information not found in metastore' in: " + msg, msg

http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
index aefafe0..3ed88f2 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseMetastoreMetrics.java
@@ -41,6 +41,8 @@ import java.io.IOException;
  */
 public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests {
 
+  private CodahaleMetrics metrics;
+
   @BeforeClass
   public static void startup() throws Exception {
     HBaseIntegrationTests.startMiniCluster();
@@ -64,6 +66,7 @@ public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests {
     conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name());
     SessionState.start(new CliSessionState(conf));
     driver = new Driver(conf);
+    metrics = (CodahaleMetrics) MetricsFactory.getInstance();
   }
 
   @Test
@@ -104,7 +107,6 @@ public class TestHBaseMetastoreMetrics extends HBaseIntegrationTests {
     driver.run("use default");
     driver.run("drop database tempdb cascade");
 
-    CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance();
     String json = metrics.dumpJson();
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_DATABASES, 2);
     MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.CREATE_TOTAL_TABLES, 7);

http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
index 70c0b13..3ff5742 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseQTestUtil.java
@@ -39,14 +39,12 @@ public class HBaseQTestUtil extends QTestUtil {
   /** A handle to this harness's cluster */
   private final HConnection conn;
 
-  private HBaseTestSetup setup;
-
   public HBaseQTestUtil(
     String outDir, String logDir, MiniClusterType miniMr, HBaseTestSetup setup,
     String initScript, String cleanupScript)
     throws Exception {
+
     super(outDir, logDir, miniMr, null, "0.20", initScript, cleanupScript, false, false);
-    this.setup = setup;
     setup.preTest(conf);
     this.conn = setup.getConnection();
     super.init();
@@ -71,12 +69,6 @@ public class HBaseQTestUtil extends QTestUtil {
   }
 
   @Override
-  protected void initConfFromSetup() throws Exception {
-    super.initConfFromSetup();
-    setup.preTest(conf);
-  }
-
-  @Override
   public void createSources(String tname) throws Exception {
     super.createSources(tname);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
index cee7158..e6383dc 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/hbase/HBaseTestSetup.java
@@ -22,6 +22,9 @@ import java.io.IOException;
 import java.net.ServerSocket;
 import java.util.Arrays;
 
+import junit.extensions.TestSetup;
+import junit.framework.Test;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;

http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 2f109ab..8473436 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -510,7 +510,6 @@ public class QTestUtil {
       dfs.shutdown();
       dfs = null;
     }
-    Hive.closeCurrent();
   }
 
   public String readEntireFileIntoString(File queryFile) throws IOException {
@@ -732,9 +731,8 @@ public class QTestUtil {
       return;
     }
 
-    conf.set("hive.metastore.filter.hook",
+    db.getConf().set("hive.metastore.filter.hook",
         "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl");
-    db = Hive.get(conf);
     // Delete any tables other than the source tables
     // and any databases other than the default database.
     for (String dbName : db.getAllDatabases()) {
@@ -802,20 +800,16 @@ public class QTestUtil {
       return;
     }
 
+    clearTablesCreatedDuringTests();
+    clearKeysCreatedInTests();
+
     // allocate and initialize a new conf since a test can
     // modify conf by using 'set' commands
     conf = new HiveConf(Driver.class);
     initConf();
-    initConfFromSetup();
-
     // renew the metastore since the cluster type is unencrypted
     db = Hive.get(conf);  // propagate new conf to meta store
 
-    clearTablesCreatedDuringTests();
-    clearKeysCreatedInTests();
-  }
-
-  protected void initConfFromSetup() throws Exception {
     setup.preTest(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 64a26ac..cdd12ab 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -178,7 +178,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
   private boolean isConnected = false;
   private URI metastoreUris[];
   private final HiveMetaHookLoader hookLoader;
-  protected final HiveConf conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+  protected final HiveConf conf;
   protected boolean fastpath = false;
   private String tokenStrForm;
   private final boolean localMetaStore;
@@ -205,10 +205,8 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
     this.hookLoader = hookLoader;
     if (conf == null) {
       conf = new HiveConf(HiveMetaStoreClient.class);
-      this.conf = conf;
-    } else {
-      this.conf = new HiveConf(conf);
     }
+    this.conf = conf;
     filterHook = loadFilterHooks();
     fileMetadataBatchSize = HiveConf.getIntVar(
         conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX);
@@ -223,10 +221,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
       // instantiate the metastore server handler directly instead of connecting
       // through the network
       if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) {
-        client = new HiveMetaStore.HMSHandler("hive client", this.conf, true);
+        client = new HiveMetaStore.HMSHandler("hive client", conf, true);
         fastpath = true;
       } else {
-        client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+        client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true);
       }
       isConnected = true;
       snapshotActiveConf();

http://git-wip-us.apache.org/repos/asf/hive/blob/58c4e121/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 2b15c23..8c6c46f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -554,6 +554,10 @@ public class SessionState {
     // Get the following out of the way when you start the session these take a
     // while and should be done when we start up.
     try {
+      // Hive object instance should be created with a copy of the conf object. If the conf is
+      // shared with SessionState, other parts of the code might update the config, but
+      // Hive.get(HiveConf) would not recognize the case when it needs refreshing
+      Hive.get(new HiveConf(startSs.conf)).getMSC();
       UserGroupInformation sessionUGI = Utils.getUGI();
       FileSystem.get(startSs.conf);
 
@@ -579,6 +583,10 @@ public class SessionState {
       }
     } catch (RuntimeException e) {
       throw e;
+    } catch (Hive.SchemaException e) {
+      RuntimeException ex = new RuntimeException(e.getMessage());
+      ex.setStackTrace(new StackTraceElement[0]);
+      throw ex;
     } catch (Exception e) {
       // Catch-all due to some exec time dependencies on session state
       // that would cause ClassNoFoundException otherwise


[44/58] [abbrv] hive git commit: Revert "HIVE-13496. Create initial test data once across multiple test runs - TestCliDriver. (Siddharth Seth, reviewed by Ashutosh Chauhan)"

Posted by jd...@apache.org.
Revert "HIVE-13496. Create initial test data once across multiple test runs - TestCliDriver. (Siddharth Seth, reviewed by Ashutosh Chauhan)"

This reverts commit 976e628fc01911936caa19e61ea3342f3a19455a.


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e3e43c6d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e3e43c6d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e3e43c6d

Branch: refs/heads/llap
Commit: e3e43c6dd89b9643d3f7a8f21c4789af24bba6eb
Parents: 940fc7d
Author: Siddharth Seth <ss...@apache.org>
Authored: Thu Apr 14 11:32:19 2016 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Thu Apr 14 11:32:19 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/QTestUtil.java    | 247 ++-----------------
 pom.xml                                         |   2 -
 ql/src/test/templates/TestCliDriver.vm          |  38 +--
 3 files changed, 22 insertions(+), 265 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e3e43c6d/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 79646cd..2f109ab 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -51,6 +51,7 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.Deque;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -62,7 +63,6 @@ import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
@@ -84,7 +84,9 @@ import org.apache.hadoop.hive.common.io.SortPrintStream;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.LlapItUtils;
+import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
+import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon;
 import org.apache.hadoop.hive.llap.io.api.LlapProxy;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -97,7 +99,6 @@ import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
 import org.apache.hadoop.hive.ql.metadata.Hive;
-import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -138,8 +139,6 @@ public class QTestUtil {
   // security property names
   private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri";
   private static final String CRLF = System.getProperty("line.separator");
-  private static final String TEST_BUILD_DIR = System.getProperty("test.build.dir");
-  private static final String CACHED_DATA_DIR_NAME = "cachedData";
 
   private static final Logger LOG = LoggerFactory.getLogger("QTestUtil");
   private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
@@ -184,16 +183,6 @@ public class QTestUtil {
   private final String cleanupScript;
   private boolean useHBaseMetastore = false;
 
-  // Parameters which help tracking cached data generation.
-  private final String driverName;
-  private Path cachedDataPath;
-  private String metaStorePathString;
-  private Path metaStorePath;
-  private FileSystem localFs;
-  private boolean attemptingCacheUsage;
-
-  private boolean dbEtcSetup = false;
-
   public interface SuiteAddTestFunctor {
     public void addTestToSuite(TestSuite suite, Object setup, String tName);
   }
@@ -389,34 +378,11 @@ public class QTestUtil {
   }
 
   public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
-                   String confDir, String hadoopVer, String initScript, String cleanupScript,
-                   boolean useHBaseMetastore, boolean withLlapIo) throws Exception {
-    // For now, to avoid changing multiple test templates, a null driver name avoids
-    // data generation optimizations.
-    this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript,
-        useHBaseMetastore, withLlapIo, null);
-  }
-
-  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
       String confDir, String hadoopVer, String initScript, String cleanupScript,
-      boolean useHBaseMetastore, boolean withLlapIo, String driverName)
+      boolean useHBaseMetastore, boolean withLlapIo)
     throws Exception {
-    this.attemptingCacheUsage = (StringUtils.isEmpty(TEST_BUILD_DIR) ||
-        StringUtils.isEmpty(driverName) || useHBaseMetastore) ? false : true;
-    this.driverName = driverName;
     this.outDir = outDir;
     this.logDir = logDir;
-    LOG.info("Creating QTestUtil with settings: "
-        + "driverName=" + driverName
-        + ", attemptingCacheUsage=" + attemptingCacheUsage
-        + ", test.build.dir=" + System.getProperty("test.build.dir")
-        + ", useHbaseMetaStore=" + useHBaseMetastore
-        + ", withLlapIo=" + withLlapIo
-        + ", confDir=" + confDir
-        + ", outDir=" + outDir
-        + ", logDir=" + logDir
-        + ", initScript=" + initScript
-        + ", cleanupScript=" + cleanupScript);
     this.useHBaseMetastore = useHBaseMetastore;
 
     if (confDir != null && !confDir.isEmpty()) {
@@ -505,7 +471,6 @@ public class QTestUtil {
     if (scriptsDir == null) {
       scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
     }
-    LOG.info("Using DataDir=" + dataDir + ", ScriptsDir=" + scriptsDir);
 
     this.initScript = scriptsDir + File.separator + initScript;
     this.cleanupScript = scriptsDir + File.separator + cleanupScript;
@@ -867,17 +832,6 @@ public class QTestUtil {
       return;
     }
 
-    if (!attemptingCacheUsage) {
-      cleanupNonCacheUsage();
-    } else {
-      cleanupCacheUsage();
-    }
-
-    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
-    FunctionRegistry.unregisterTemporaryUDF("test_error");
-  }
-
-  private void cleanupNonCacheUsage() throws Exception {
     clearTablesCreatedDuringTests();
     clearKeysCreatedInTests();
 
@@ -895,42 +849,21 @@ public class QTestUtil {
       LOG.info("No cleanup script detected. Skipping.");
     }
 
-    cleanupWarehouseDir();
-  }
-
-  private void cleanupCacheUsage() throws IOException {
-    // Remove the Warehouse and metastore directories completely.
-    // Also close the current db, since files are going to come in to replace it soon.
-    Preconditions.checkState(attemptingCacheUsage);
-    Preconditions.checkNotNull(metaStorePath);
-    Preconditions.checkNotNull(localFs);
-    Hive.closeCurrent();
-    cleanupMetastoreDir();
-    cleanupWarehouseDir();
-  }
-
-  private void cleanupWarehouseDir() throws IOException {
     // delete any contents in the warehouse dir
     Path p = new Path(testWarehouse);
     FileSystem fs = p.getFileSystem(conf);
 
     try {
-      FileStatus[] ls = fs.listStatus(p);
-      for (int i = 0; (ls != null) && (i < ls.length); i++) {
+      FileStatus [] ls = fs.listStatus(p);
+      for (int i=0; (ls != null) && (i<ls.length); i++) {
         fs.delete(ls[i].getPath(), true);
       }
     } catch (FileNotFoundException e) {
       // Best effort
     }
-  }
 
-  private void cleanupMetastoreDir() throws IOException {
-    try {
-      LOG.info("Cleaning up metastore Dir: {}", metaStorePath);
-      localFs.delete(metaStorePath, true);
-    } catch (FileNotFoundException e) {
-      // Best effort
-    }
+    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
+    FunctionRegistry.unregisterTemporaryUDF("test_error");
   }
 
   protected void runCreateTableCmd(String createTableCmd) throws Exception {
@@ -960,10 +893,6 @@ public class QTestUtil {
   }
 
   public void createSources(String tname) throws Exception {
-    createSources(tname, false);
-  }
-
-  public void createSources(String tname, boolean forceCreate) throws Exception {
     boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
     if(!isSessionStateStarted) {
       startSessionState(canReuseSession);
@@ -972,173 +901,34 @@ public class QTestUtil {
     if(cliDriver == null) {
       cliDriver = new CliDriver();
     }
-
+    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
     File scriptFile = new File(this.initScript);
     if (!scriptFile.isFile()) {
       LOG.info("No init script detected. Skipping");
-      if (attemptingCacheUsage) {
-        setupDbsEtc(true, true);
-      }
       return;
     }
+    conf.setBoolean("hive.test.init.phase", true);
 
-    if (!attemptingCacheUsage || forceCreate) {
-      LOG.info("Creating sources without data caching. attemptingCacheUsage={}, forceCreate={}",
-          attemptingCacheUsage, forceCreate);
-      cliDriver.processLine("set test.data.dir=" + testFiles + ";");
-      conf.setBoolean("hive.test.init.phase", true);
-      createSourcesNonCached(scriptFile);
-    } else {
-      LOG.info("Creating sources with data caching");
-      createSourcesCached(scriptFile);
-    }
-
-    conf.setBoolean("hive.test.init.phase", false);
-  }
-
-  private void createSourcesNonCached(File scriptFile) throws IOException {
     String initCommands = readEntireFileIntoString(scriptFile);
     LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
 
     cliDriver.processLine(initCommands);
-  }
-
-  private void createSourcesCached(File scriptFile) throws IOException, HiveException {
 
-    // First check if the cache already exists. If it does just copy it over.
-    Path cachedWarehousePath = new Path(cachedDataPath, "warehouse");
-    Path cachedMetaStorePtah = new Path(cachedDataPath, "metastore");
-    if (localFs.exists(cachedDataPath)) {
-      if (localFs.exists(cachedWarehousePath) && localFs.exists(cachedMetaStorePtah)) {
-        LOG.info("Cached data found in {}. Attempting to use it", cachedDataPath);
-        // Data is alredy cached
-        // Copy the files over to where they should be
-        Path warehousePath = new Path(testWarehouse);
-        FileSystem warehouseFs = warehousePath.getFileSystem(conf);
-        try {
-          warehouseFs.delete(warehousePath, false);
-        } catch (FileNotFoundException e) {
-          // Does not matter if it does not exist.
-        }
-        warehouseFs.copyFromLocalFile(false, cachedWarehousePath, warehousePath);
-
-        try {
-          localFs.delete(metaStorePath, false);
-        } catch (IOException e) {
-          // Does not matter if it does not exist.
-        }
-        localFs.copyFromLocalFile(false, cachedMetaStorePtah, metaStorePath);
-        setupDbsEtc(true, false);
-        cliDriver.processLine("set test.data.dir=" + testFiles + ";");
-        conf.setBoolean("hive.test.init.phase", true);
-
-        return;
-      } else {
-        // Something is missing. Cleanup. Re-generate and cache
-        LOG.info("Partial or no cached data found at {}. Cache will be created", cachedDataPath);
-        localFs.delete(cachedDataPath, true);
-      }
-    } else {
-      LOG.info("No cached data found at {}. Cache will be created", cachedDataPath);
-      // No caching. Re-generate the data and cache it.
-    }
-
-    // Generate and cache the data
-    setupDbsEtc(true, true);
-    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
-    conf.setBoolean("hive.test.init.phase", true);
-    createSourcesNonCached(scriptFile);
-
-    // Close the DB so that contents can be copied out safely.
-    Hive.closeCurrent();
-
-    // Cache the sources
-    localFs.mkdirs(cachedDataPath);
-
-    Path warehousePath = new Path(testWarehouse);
-    FileSystem warehouseFs = warehousePath.getFileSystem(conf);
-
-    warehouseFs.copyToLocalFile(false, warehousePath, cachedWarehousePath, true);
-    localFs.copyToLocalFile(false, metaStorePath, cachedMetaStorePtah, true);
-
-    // Re-open the DB etc.
-    setupDbsEtc(true, false);
-  }
-
-  private static final Pattern metaStoreUriPattern =
-      Pattern.compile("derby.*?databaseName=(.*?)(;|$)");
-
-  private String getDerbyDbPath(String jdbcConnectString) {
-    if (StringUtils.isEmpty(jdbcConnectString)) {
-      return null;
-    }
-    Matcher matcher = metaStoreUriPattern.matcher(jdbcConnectString);
-    if (matcher.find()) {
-      return matcher.group(1);
-    } else {
-      return null;
-    }
+    conf.setBoolean("hive.test.init.phase", false);
   }
 
   public void init() throws Exception {
-    LOG.info("init");
-    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
-    LOG.info("TestWarehouseDir set to: [{}]", testWarehouse);
-    if (attemptingCacheUsage) {
-      // The derby path comes from METASTORECONNECTURLKEY. Default ends up being target/junit_metastore_db
-      String metaStoreConnectUrl = conf.getVar(ConfVars.METASTORECONNECTURLKEY);
-      LOG.info("MetastoreConnectUrl: " + metaStoreConnectUrl);
-      metaStorePathString = getDerbyDbPath(metaStoreConnectUrl);
-
-      if (metaStorePathString == null) {
-        LOG.warn(
-            "Disabling attempted cache usage since metastore path cannot be determined from {}",
-            metaStoreConnectUrl);
-        attemptingCacheUsage = false;
-      } else {
-        LOG.info("Metastore url path: " + metaStorePathString);
-        metaStorePath = new Path(metaStorePathString);
-        if (metaStorePath.isAbsolute() && metaStorePathString.split(File.separator).length >= 3) {
-          // Turn this on only if the path is absolute, and is at least 3 deep - since we'll be deleting files later.
-          localFs = FileSystem.getLocal(conf).getRaw();
-          assert(TEST_BUILD_DIR != null);
-          cachedDataPath = new Path(TEST_BUILD_DIR, CACHED_DATA_DIR_NAME);
-          cachedDataPath = new Path(cachedDataPath, driverName);
-          LOG.info("Using cachedDataPath: " + cachedDataPath);
-        } else {
-          LOG.warn(
-              "Disableing attempted cache usage since metastore path may not be absolute, or depth is < 3. MetaStorePath={}",
-              metaStorePathString);
-          metaStorePath = null;
-          attemptingCacheUsage = false;
-        }
 
-      }
-    }
+    testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
     String execEngine = conf.get("hive.execution.engine");
     conf.set("hive.execution.engine", "mr");
     SessionState.start(conf);
     conf.set("hive.execution.engine", execEngine);
-
-    if (!attemptingCacheUsage) {
-      setupDbsEtc(true, true);
-    }
-  }
-
-  private void setupDbsEtc(boolean force, boolean isNewDb) throws HiveException {
-    if (!dbEtcSetup || force) {
-      if (isNewDb) {
-        db = Hive.get(conf);
-      } else {
-        db = Hive.getWithFastCheck(conf, false);
-      }
-      LOG.info("Obtained db");
-      drv = new Driver(conf);
-      drv.init();
-      pd = new ParseDriver();
-      sem = new SemanticAnalyzer(conf);
-      dbEtcSetup = true;
-    }
+    db = Hive.get(conf);
+    drv = new Driver(conf);
+    drv.init();
+    pd = new ParseDriver();
+    sem = new SemanticAnalyzer(conf);
   }
 
   public void init(String tname) throws Exception {
@@ -1154,9 +944,8 @@ public class QTestUtil {
   public String cliInit(String tname, boolean recreate) throws Exception {
     if (recreate) {
       cleanUp(tname);
-      createSources(tname, true);
+      createSources(tname);
     }
-    setupDbsEtc(false, true);
 
     HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
     "org.apache.hadoop.hive.ql.security.DummyAuthenticator");

http://git-wip-us.apache.org/repos/asf/hive/blob/e3e43c6d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 08ef998..77cfaeb 100644
--- a/pom.xml
+++ b/pom.xml
@@ -75,7 +75,6 @@
     <test.hive.hadoop.classpath>${maven.test.classpath}</test.hive.hadoop.classpath>
     <test.log4j.scheme>file://</test.log4j.scheme>
     <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
-    <test.build.dir>${project.build.directory}</test.build.dir>
     <test.tmp.dir.uri>file://${test.tmp.dir}</test.tmp.dir.uri>
     <test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
     <test.warehouse.scheme>pfile://</test.warehouse.scheme>
@@ -1027,7 +1026,6 @@
             <test.data.dir>${basedir}/${hive.path.to.root}/data/files</test.data.dir>
             <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
             <test.tmp.dir.uri>${test.tmp.dir.uri}</test.tmp.dir.uri>
-            <test.build.dir>${test.build.dir}</test.build.dir>
             <test.dfs.mkdir>${test.dfs.mkdir}</test.dfs.mkdir>
             <test.output.overwrite>${test.output.overwrite}</test.output.overwrite>
             <test.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</test.warehouse.dir>

http://git-wip-us.apache.org/repos/asf/hive/blob/e3e43c6d/ql/src/test/templates/TestCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm
index 1961c75..72cfab9 100644
--- a/ql/src/test/templates/TestCliDriver.vm
+++ b/ql/src/test/templates/TestCliDriver.vm
@@ -17,34 +17,23 @@
  */
 package org.apache.hadoop.hive.cli;
 
-import com.google.common.base.Stopwatch;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.util.concurrent.TimeUnit;
-
 public class $className {
 
-  private static final Logger LOG = LoggerFactory.getLogger(${className}.class);
-
   private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
   private static QTestUtil qt;
 
   static {
 
-    Stopwatch stopwatch = new Stopwatch().start();
-    String message = "Starting TestCliDriver run at " + System.currentTimeMillis();
-    LOG.info(message);
-    System.err.println(message);
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
     String hiveConfDir = "$hiveConfDir";
     String initScript = "$initScript";
@@ -55,29 +44,15 @@ public class $className {
       if (!hiveConfDir.isEmpty()) {
         hiveConfDir = HIVE_ROOT + hiveConfDir;
       }
-      // TODO Is ZK startup required for TestCliDriver
-      // TODO Is LlapIo enabled required for TestCliDriver
       qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
-      hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore, true, "$className");
-      message = "QTestUtil instance created. ElapsedTimeSinceStart=" + stopwatch.elapsed(
-          TimeUnit.MILLISECONDS);
-      LOG.info(message);
-      System.err.println(message);
+      hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore, true);
 
       // do a one time initialization
       qt.cleanUp();
-      message = "Initialization cleanup done. ElapsedTimeSinceStart=" + stopwatch.elapsed(TimeUnit.MILLISECONDS);
-      LOG.info(message);
-      System.err.println(message);
-
       qt.createSources();
-      message = "Initialization createSources done. ElapsedTimeSinceStart=" + stopwatch.elapsed(TimeUnit.MILLISECONDS);
-      LOG.info(message);
-      System.err.println(message);
 
     } catch (Exception e) {
-      System.err.println("Exception: " + e.getMessage() + ". ElapsedTimeSinceStart="
-          + stopwatch.elapsed(TimeUnit.MILLISECONDS));
+      System.err.println("Exception: " + e.getMessage());
       e.printStackTrace();
       System.err.flush();
       fail("Unexpected exception in static initialization: "+e.getMessage());
@@ -87,7 +62,6 @@ public class $className {
   @Before
   public void setUp() {
     try {
-      // TODO This restarts ZK for each test. Is that requried ?
       qt.clearTestSideEffects();
     } catch (Exception e) {
       System.err.println("Exception: " + e.getMessage());
@@ -139,9 +113,7 @@ public class $className {
   private void runTest(String tname, String fname, String fpath) throws Exception {
     long startTime = System.currentTimeMillis();
     try {
-      String message = "Begin query: " + fname + ", startTime=" + startTime;
-      System.err.println(message);
-      LOG.info(message);
+      System.err.println("Begin query: " + fname);
 
       qt.addFile(fpath);
 
@@ -164,9 +136,7 @@ public class $className {
     }
 
     long elapsedTime = System.currentTimeMillis() - startTime;
-    String message = "Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s";
-    System.err.println(message);
-    LOG.info(message);
+    System.err.println("Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s");
     assertTrue("Test passed", true);
   }
 }


[04/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
index a628aae..16aa452 100644
--- a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning.q.out
@@ -62,10 +62,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
@@ -213,16 +213,20 @@ STAGE PLANS:
                       expressions: ds (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -237,11 +241,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -251,19 +259,23 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -336,11 +348,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -350,19 +366,23 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -457,39 +477,47 @@ STAGE PLANS:
                       expressions: ds (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
         Map 8 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: hr (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: hr
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: hr
+                            Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -504,12 +532,16 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: hr (type: string)
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -519,39 +551,47 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: string)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: string)
+                    Select Operator
+                      expressions: hr (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
-                outputColumnNames: _col3
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string)
+                  key expressions: _col1 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col3 (type: string)
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -559,8 +599,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col3 (type: string)
-                  1 hr (type: string)
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -640,12 +680,16 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: (ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: hr (type: string)
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -655,39 +699,47 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: string)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: string)
+                    Select Operator
+                      expressions: hr (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
-                outputColumnNames: _col3
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string)
+                  key expressions: _col1 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col3 (type: string)
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -695,8 +747,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col3 (type: string)
-                  1 hr (type: string)
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -782,48 +834,56 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: ds (type: string)
-                      outputColumnNames: _col0
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: hr (type: string)
-                      outputColumnNames: _col0
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col2 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: hr
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: hr
+                            Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -837,33 +897,41 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string), hr (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: ds (type: string), hr (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: ds (type: string), hr (type: string)
+                    Select Operator
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col2 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col2 (type: string)
+                        Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string), hr (type: string)
-                  1 ds (type: string), hr (type: string)
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col2 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -936,33 +1004,41 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: (ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string), hr (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: ds (type: string), hr (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: ds (type: string), hr (type: string)
+                    Select Operator
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col2 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col2 (type: string)
+                        Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string), hr (type: string)
-                  1 ds (type: string), hr (type: string)
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col2 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1053,16 +1129,20 @@ STAGE PLANS:
                       expressions: ds (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -1077,11 +1157,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1091,19 +1175,23 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1176,11 +1264,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1190,19 +1282,23 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 21 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1282,25 +1378,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+                      expressions: hr (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
+                      Select Operator
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: UDFToDouble(hr)
+                        Group By Operator
+                          keys: _col0 (type: double)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: UDFToDouble(hr)
+                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -1313,39 +1413,44 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: UDFToDouble(hr) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToDouble(hr) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToDouble(hr) (type: double)
+                      key expressions: UDFToDouble(_col0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: UDFToDouble(hr) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      sort order: +
-                      Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        sort order: +
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToDouble(hr) (type: double)
-                  1 UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 UDFToDouble(_col0) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1413,25 +1518,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: hr (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: (hr * 2)
+                        Group By Operator
+                          keys: _col0 (type: double)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: (UDFToDouble(hr) * 2.0)
+                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -1444,39 +1553,44 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: (hr * 2) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (hr * 2) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (hr * 2) (type: double)
+                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (hr * 2) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: double)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (hr * 2) (type: double)
-                  1 hr (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  1 _col0 (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1546,39 +1660,44 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: UDFToDouble(hr) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToDouble(hr) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToDouble(hr) (type: double)
+                      key expressions: UDFToDouble(_col0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: UDFToDouble(hr) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      sort order: +
-                      Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        sort order: +
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToDouble(hr) (type: double)
-                  1 UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 UDFToDouble(_col0) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1648,39 +1767,44 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: (hr * 2) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (hr * 2) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (hr * 2) (type: double)
+                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (hr * 2) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: double)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (hr * 2) (type: double)
-                  1 hr (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  1 _col0 (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1761,25 +1885,29 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: UDFToString(hr) (type: string)
+                      expressions: hr (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: UDFToString(_col0) (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: UDFToString((hr * 2))
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: UDFToString((UDFToDouble(hr) * 2.0))
+                            Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -1792,39 +1920,44 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: UDFToString((hr * 2)) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToString((hr * 2)) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToString((hr * 2)) (type: string)
+                      key expressions: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
                       sort order: +
-                      Map-reduce partition columns: UDFToString((hr * 2)) (type: string)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToString(hr) (type: string)
-                      sort order: +
-                      Map-reduce partition columns: UDFToString(hr) (type: string)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: UDFToString(_col0) (type: string)
+                        sort order: +
+                        Map-reduce partition columns: UDFToString(_col0) (type: string)
+                        Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToString((hr * 2)) (type: string)
-                  1 UDFToString(hr) (type: string)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                  1 UDFToString(_col0) (type: string)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1887,7 +2020,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'
 PREHOOK: type: QUERY
@@ -1895,15 +2027,64 @@ POSTHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
+  Stage: Stage-2
+    Spark
+      Edges:
+        Reducer 7 <- Map 6 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: '2008-04-08' (type: string)
+                    outputColumnNames: ds
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: ds (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+        Reducer 7 
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Spark Partition Pruning Sink Operator
+                      partition key expr: ds
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      target column name: ds
+                      target work: Map 1
+
   Stage: Stage-1
     Spark
       Edges:
         Reducer 5 <- Map 4 (GROUP, 2)
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Reducer 5 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2)
         Reducer 3 <- Reducer 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
@@ -1911,13 +2092,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: (ds = '2008-04-08') (type: boolean)
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: ds is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      sort order: 
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1926,10 +2111,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1944,9 +2129,9 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 
-                  1 
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1977,11 +2162,11 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 
   Stage: Stage-0
     Fetch Operator
@@ -1989,18 +2174,21 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
@@ -2016,7 +2204,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
 EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
@@ -2040,23 +2228,31 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: ds (type: string), hr (type: string)
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: string), _col1 (type: string)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((date = '2008-04-08') and (hour = 11)) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((date = '2008-04-08') and (hour = 11)) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      sort order: 
+                    Select Operator
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: ds (type: string), hr (type: string)
+                      Reduce Output Operator
+                        sort order: 
+                        Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                        value expressions: _col0 (type: string), _col2 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -2065,10 +2261,10 @@ STAGE PLANS:
                 keys:
                   0 
                   1 
-                outputColumnNames: _col2, _col3, _col7, _col9
+                outputColumnNames: _col0, _col1, _col2, _col4
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
-                  predicate: ((_col2 = _col7) or (_col3 = _col9)) (type: boolean)
+                  predicate: ((_col0 = _col2) or (_col1 = _col4)) (type: boolean)
                   Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
@@ -2102,7 +2298,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -2142,48 +2338,56 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: ds (type: string)
-                      outputColumnNames: _col0
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: hr (type: string)
-                      outputColumnNames: _col0
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col2 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: hr
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: hr
+                            Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
 
   Stage: Stage-1
     Spark
@@ -2197,49 +2401,51 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string), hr (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: ds (type: string), hr (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: ds (type: string), hr (type: string)
+                    Select Operator
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col2 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col2 (type: string)
+                        Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string), hr (type: string)
-                  1 ds (type: string), hr (type: string)
-                outputColumnNames: _col2, _col3, _col7, _col9
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col2 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
-                Filter Operator
-                  predicate: ((_col2 = _col7) and (_col3 = _col9)) (type: boolean)
-                  Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      mode: hash
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: bigint)
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: _col0 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
               Group By Operator
@@ -2287,62 +2493,101 @@ POSTHOOK: query: -- left join
 EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpa

<TRUNCATED>

[13/58] [abbrv] hive git commit: HIVE-13405 : Fix Connection Leak in OrcRawRecordMerger (Thomas Poepping via Prasanth J)

Posted by jd...@apache.org.
HIVE-13405 : Fix Connection Leak in OrcRawRecordMerger (Thomas Poepping via Prasanth J)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3d755444
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3d755444
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3d755444

Branch: refs/heads/llap
Commit: 3d75544479db8a4defe0d5a53b31307c73bad550
Parents: e98f7ac
Author: Thomas Poepping <po...@amazon.com>
Authored: Mon Apr 4 13:47:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Apr 9 17:17:13 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java     | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3d755444/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
index f495be2..1fce282 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
@@ -546,13 +546,11 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
                                          Path deltaFile) throws IOException {
     Path lengths = OrcRecordUpdater.getSideFile(deltaFile);
     long result = Long.MAX_VALUE;
-    try {
-      FSDataInputStream stream = fs.open(lengths);
+    try (FSDataInputStream stream = fs.open(lengths)) {
       result = -1;
       while (stream.available() > 0) {
         result = stream.readLong();
       }
-      stream.close();
       return result;
     } catch (IOException ioe) {
       return result;


[39/58] [abbrv] hive git commit: HIVE-12159: Create vectorized readers for the complex types (Owen O'Malley, reviewed by Matt McCline)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorOrcFile.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorOrcFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorOrcFile.java
index a82d672..460c925 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorOrcFile.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorOrcFile.java
@@ -32,40 +32,14 @@ import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.MapColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.StructColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector;
-import org.apache.hadoop.hive.ql.exec.vector.TimestampUtils;
 import org.apache.hadoop.hive.ql.exec.vector.UnionColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
-import org.apache.hadoop.hive.serde2.io.ByteWritable;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
-import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
-import org.apache.hadoop.hive.serde2.io.ShortWritable;
-import org.apache.hadoop.hive.serde2.io.TimestampWritable;
-import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
-import org.apache.hadoop.hive.serde2.objectinspector.StructField;
-import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector;
-import org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector;
-import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hive.common.util.HiveTestUtils;
 import org.apache.orc.BinaryColumnStatistics;
@@ -117,6 +91,10 @@ public class TestVectorOrcFile {
   public static class InnerStruct {
     int int1;
     Text string1 = new Text();
+    InnerStruct(int int1, Text string1) {
+      this.int1 = int1;
+      this.string1.set(string1);
+    }
     InnerStruct(int int1, String string1) {
       this.int1 = int1;
       this.string1.set(string1);
@@ -136,50 +114,6 @@ public class TestVectorOrcFile {
     }
   }
 
-  public static class BigRow {
-    Boolean boolean1;
-    Byte byte1;
-    Short short1;
-    Integer int1;
-    Long long1;
-    Float float1;
-    Double double1;
-    BytesWritable bytes1;
-    Text string1;
-    MiddleStruct middle;
-    List<InnerStruct> list = new ArrayList<InnerStruct>();
-    Map<Text, InnerStruct> map = new HashMap<Text, InnerStruct>();
-
-    BigRow(Boolean b1, Byte b2, Short s1, Integer i1, Long l1, Float f1,
-           Double d1,
-           BytesWritable b3, String s2, MiddleStruct m1,
-           List<InnerStruct> l2, Map<String, InnerStruct> m2) {
-      this.boolean1 = b1;
-      this.byte1 = b2;
-      this.short1 = s1;
-      this.int1 = i1;
-      this.long1 = l1;
-      this.float1 = f1;
-      this.double1 = d1;
-      this.bytes1 = b3;
-      if (s2 == null) {
-        this.string1 = null;
-      } else {
-        this.string1 = new Text(s2);
-      }
-      this.middle = m1;
-      this.list = l2;
-      if (m2 != null) {
-        this.map = new HashMap<Text, InnerStruct>();
-        for (Map.Entry<String, InnerStruct> item : m2.entrySet()) {
-          this.map.put(new Text(item.getKey()), item.getValue());
-        }
-      } else {
-        this.map = null;
-      }
-    }
-  }
-
   private static InnerStruct inner(int i, String s) {
     return new InnerStruct(i, s);
   }
@@ -301,206 +235,115 @@ public class TestVectorOrcFile {
     assertEquals("count: 7500 hasNull: true min: bye max: hi sum: 0", stats[9].toString());
 
     // check the inspectors
-    StructObjectInspector readerInspector = (StructObjectInspector) reader
-        .getObjectInspector();
-    assertEquals(ObjectInspector.Category.STRUCT, readerInspector.getCategory());
+    TypeDescription schema = reader.getSchema();
+    assertEquals(TypeDescription.Category.STRUCT, schema.getCategory());
     assertEquals("struct<boolean1:boolean,byte1:tinyint,short1:smallint,"
         + "int1:int,long1:bigint,float1:float,double1:double,bytes1:"
         + "binary,string1:string,middle:struct<list:array<struct<int1:int,"
         + "string1:string>>>,list:array<struct<int1:int,string1:string>>,"
         + "map:map<string,struct<int1:int,string1:string>>,ts:timestamp,"
-        + "decimal1:decimal(38,18)>", readerInspector.getTypeName());
-    List<? extends StructField> fields = readerInspector
-        .getAllStructFieldRefs();
-    BooleanObjectInspector bo = (BooleanObjectInspector) readerInspector
-        .getStructFieldRef("boolean1").getFieldObjectInspector();
-    ByteObjectInspector by = (ByteObjectInspector) readerInspector
-        .getStructFieldRef("byte1").getFieldObjectInspector();
-    ShortObjectInspector sh = (ShortObjectInspector) readerInspector
-        .getStructFieldRef("short1").getFieldObjectInspector();
-    IntObjectInspector in = (IntObjectInspector) readerInspector
-        .getStructFieldRef("int1").getFieldObjectInspector();
-    LongObjectInspector lo = (LongObjectInspector) readerInspector
-        .getStructFieldRef("long1").getFieldObjectInspector();
-    FloatObjectInspector fl = (FloatObjectInspector) readerInspector
-        .getStructFieldRef("float1").getFieldObjectInspector();
-    DoubleObjectInspector dbl = (DoubleObjectInspector) readerInspector
-        .getStructFieldRef("double1").getFieldObjectInspector();
-    BinaryObjectInspector bi = (BinaryObjectInspector) readerInspector
-        .getStructFieldRef("bytes1").getFieldObjectInspector();
-    StringObjectInspector st = (StringObjectInspector) readerInspector
-        .getStructFieldRef("string1").getFieldObjectInspector();
-    StructObjectInspector mid = (StructObjectInspector) readerInspector
-        .getStructFieldRef("middle").getFieldObjectInspector();
-    List<? extends StructField> midFields = mid.getAllStructFieldRefs();
-    ListObjectInspector midli = (ListObjectInspector) midFields.get(0)
-        .getFieldObjectInspector();
-    StructObjectInspector inner = (StructObjectInspector) midli
-        .getListElementObjectInspector();
-    List<? extends StructField> inFields = inner.getAllStructFieldRefs();
-    ListObjectInspector li = (ListObjectInspector) readerInspector
-        .getStructFieldRef("list").getFieldObjectInspector();
-    MapObjectInspector ma = (MapObjectInspector) readerInspector
-        .getStructFieldRef("map").getFieldObjectInspector();
-    TimestampObjectInspector tso = (TimestampObjectInspector) readerInspector
-        .getStructFieldRef("ts").getFieldObjectInspector();
-    HiveDecimalObjectInspector dco = (HiveDecimalObjectInspector) readerInspector
-        .getStructFieldRef("decimal1").getFieldObjectInspector();
-    StringObjectInspector mk = (StringObjectInspector) ma
-        .getMapKeyObjectInspector();
+        + "decimal1:decimal(38,10)>", schema.toString());
+    VectorizedRowBatch batch = schema.createRowBatch();
+
     RecordReader rows = reader.rows();
-    Object row = rows.next(null);
-    assertNotNull(row);
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1024, batch.size);
+
     // check the contents of the first row
-    assertEquals(false,
-        bo.get(readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals(1,
-        by.get(readerInspector.getStructFieldData(row, fields.get(1))));
-    assertEquals(1024,
-        sh.get(readerInspector.getStructFieldData(row, fields.get(2))));
-    assertEquals(65536,
-        in.get(readerInspector.getStructFieldData(row, fields.get(3))));
-    assertEquals(Long.MAX_VALUE,
-        lo.get(readerInspector.getStructFieldData(row, fields.get(4))));
-    assertEquals(1.0,
-        fl.get(readerInspector.getStructFieldData(row, fields.get(5))), 0.00001);
-    assertEquals(-15.0,
-        dbl.get(readerInspector.getStructFieldData(row, fields.get(6))),
-        0.00001);
-    assertEquals(bytes(0, 1, 2, 3, 4),
-        bi.getPrimitiveWritableObject(readerInspector.getStructFieldData(row,
-            fields.get(7))));
-    assertEquals("hi", st.getPrimitiveJavaObject(readerInspector
-        .getStructFieldData(row, fields.get(8))));
-    List<?> midRow = midli.getList(mid.getStructFieldData(
-        readerInspector.getStructFieldData(row, fields.get(9)),
-        midFields.get(0)));
-    assertNotNull(midRow);
-    assertEquals(2, midRow.size());
-    assertEquals(1,
-        in.get(inner.getStructFieldData(midRow.get(0), inFields.get(0))));
-    assertEquals("bye", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        midRow.get(0), inFields.get(1))));
-    assertEquals(2,
-        in.get(inner.getStructFieldData(midRow.get(1), inFields.get(0))));
-    assertEquals("sigh", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        midRow.get(1), inFields.get(1))));
-    List<?> list = li.getList(readerInspector.getStructFieldData(row,
-        fields.get(10)));
-    assertEquals(2, list.size());
-    assertEquals(3,
-        in.get(inner.getStructFieldData(list.get(0), inFields.get(0))));
-    assertEquals("good", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        list.get(0), inFields.get(1))));
-    assertEquals(4,
-        in.get(inner.getStructFieldData(list.get(1), inFields.get(0))));
-    assertEquals("bad", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        list.get(1), inFields.get(1))));
-    Map<?, ?> map = ma.getMap(readerInspector.getStructFieldData(row,
-        fields.get(11)));
-    assertEquals(0, map.size());
+    assertEquals(false, getBoolean(batch, 0));
+    assertEquals(1, getByte(batch, 0));
+    assertEquals(1024, getShort(batch, 0));
+    assertEquals(65536, getInt(batch, 0));
+    assertEquals(Long.MAX_VALUE, getLong(batch, 0));
+    assertEquals(1.0, getFloat(batch, 0), 0.00001);
+    assertEquals(-15.0, getDouble(batch, 0), 0.00001);
+    assertEquals(bytes(0, 1, 2, 3, 4), getBinary(batch, 0));
+    assertEquals("hi", getText(batch, 0).toString());
+
+    StructColumnVector middle = (StructColumnVector) batch.cols[9];
+    ListColumnVector midList = (ListColumnVector) middle.fields[0];
+    StructColumnVector midListStruct = (StructColumnVector) midList.child;
+    LongColumnVector midListInt = (LongColumnVector) midListStruct.fields[0];
+    BytesColumnVector midListStr = (BytesColumnVector) midListStruct.fields[1];
+    ListColumnVector list = (ListColumnVector) batch.cols[10];
+    StructColumnVector listStruct = (StructColumnVector) list.child;
+    LongColumnVector listInts = (LongColumnVector) listStruct.fields[0];
+    BytesColumnVector listStrs = (BytesColumnVector) listStruct.fields[1];
+    MapColumnVector map = (MapColumnVector) batch.cols[11];
+    BytesColumnVector mapKey = (BytesColumnVector) map.keys;
+    StructColumnVector mapValue = (StructColumnVector) map.values;
+    LongColumnVector mapValueInts = (LongColumnVector) mapValue.fields[0];
+    BytesColumnVector mapValueStrs = (BytesColumnVector) mapValue.fields[1];
+    TimestampColumnVector timestamp = (TimestampColumnVector) batch.cols[12];
+    DecimalColumnVector decs = (DecimalColumnVector) batch.cols[13];
+
+    assertEquals(false, middle.isNull[0]);
+    assertEquals(2, midList.lengths[0]);
+    int start = (int) midList.offsets[0];
+    assertEquals(1, midListInt.vector[start]);
+    assertEquals("bye", midListStr.toString(start));
+    assertEquals(2, midListInt.vector[start + 1]);
+    assertEquals("sigh", midListStr.toString(start + 1));
+
+    assertEquals(2, list.lengths[0]);
+    start = (int) list.offsets[0];
+    assertEquals(3, listInts.vector[start]);
+    assertEquals("good", listStrs.toString(start));
+    assertEquals(4, listInts.vector[start + 1]);
+    assertEquals("bad", listStrs.toString(start + 1));
+    assertEquals(0, map.lengths[0]);
     assertEquals(Timestamp.valueOf("2000-03-12 15:00:00"),
-        tso.getPrimitiveJavaObject(readerInspector.getStructFieldData(row,
-            fields.get(12))));
-    assertEquals(HiveDecimal.create("12345678.6547456"),
-        dco.getPrimitiveJavaObject(readerInspector.getStructFieldData(row,
-            fields.get(13))));
+        timestamp.asScratchTimestamp(0));
+    assertEquals(new HiveDecimalWritable(HiveDecimal.create("12345678.6547456")),
+        decs.vector[0]);
 
-    // check the contents of second row
-    assertEquals(true, rows.hasNext());
+    // check the contents of row 7499
     rows.seekToRow(7499);
-    row = rows.next(null);
-    assertEquals(true,
-        bo.get(readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals(100,
-        by.get(readerInspector.getStructFieldData(row, fields.get(1))));
-    assertEquals(2048,
-        sh.get(readerInspector.getStructFieldData(row, fields.get(2))));
-    assertEquals(65536,
-        in.get(readerInspector.getStructFieldData(row, fields.get(3))));
-    assertEquals(Long.MAX_VALUE,
-        lo.get(readerInspector.getStructFieldData(row, fields.get(4))));
-    assertEquals(2.0,
-        fl.get(readerInspector.getStructFieldData(row, fields.get(5))), 0.00001);
-    assertEquals(-5.0,
-        dbl.get(readerInspector.getStructFieldData(row, fields.get(6))),
-        0.00001);
-    assertEquals(bytes(), bi.getPrimitiveWritableObject(readerInspector
-        .getStructFieldData(row, fields.get(7))));
-    assertEquals("bye", st.getPrimitiveJavaObject(readerInspector
-        .getStructFieldData(row, fields.get(8))));
-    midRow = midli.getList(mid.getStructFieldData(
-        readerInspector.getStructFieldData(row, fields.get(9)),
-        midFields.get(0)));
-    assertNotNull(midRow);
-    assertEquals(2, midRow.size());
-    assertEquals(1,
-        in.get(inner.getStructFieldData(midRow.get(0), inFields.get(0))));
-    assertEquals("bye", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        midRow.get(0), inFields.get(1))));
-    assertEquals(2,
-        in.get(inner.getStructFieldData(midRow.get(1), inFields.get(0))));
-    assertEquals("sigh", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        midRow.get(1), inFields.get(1))));
-    list = li.getList(readerInspector.getStructFieldData(row, fields.get(10)));
-    assertEquals(3, list.size());
-    assertEquals(100000000,
-        in.get(inner.getStructFieldData(list.get(0), inFields.get(0))));
-    assertEquals("cat", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        list.get(0), inFields.get(1))));
-    assertEquals(-100000,
-        in.get(inner.getStructFieldData(list.get(1), inFields.get(0))));
-    assertEquals("in", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        list.get(1), inFields.get(1))));
-    assertEquals(1234,
-        in.get(inner.getStructFieldData(list.get(2), inFields.get(0))));
-    assertEquals("hat", st.getPrimitiveJavaObject(inner.getStructFieldData(
-        list.get(2), inFields.get(1))));
-    map = ma.getMap(readerInspector.getStructFieldData(row, fields.get(11)));
-    assertEquals(2, map.size());
-    boolean[] found = new boolean[2];
-    for(Object key : map.keySet()) {
-      String str = mk.getPrimitiveJavaObject(key);
-      if (str.equals("chani")) {
-        assertEquals(false, found[0]);
-        assertEquals(5,
-            in.get(inner.getStructFieldData(map.get(key), inFields.get(0))));
-        assertEquals(str, st.getPrimitiveJavaObject(inner.getStructFieldData(
-            map.get(key), inFields.get(1))));
-        found[0] = true;
-      } else if (str.equals("mauddib")) {
-        assertEquals(false, found[1]);
-        assertEquals(1,
-            in.get(inner.getStructFieldData(map.get(key), inFields.get(0))));
-        assertEquals(str, st.getPrimitiveJavaObject(inner.getStructFieldData(
-            map.get(key), inFields.get(1))));
-        found[1] = true;
-      } else {
-        throw new IllegalArgumentException("Unknown key " + str);
-      }
-    }
-    assertEquals(true, found[0]);
-    assertEquals(true, found[1]);
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(true, getBoolean(batch, 0));
+    assertEquals(100, getByte(batch, 0));
+    assertEquals(2048, getShort(batch, 0));
+    assertEquals(65536, getInt(batch, 0));
+    assertEquals(Long.MAX_VALUE, getLong(batch, 0));
+    assertEquals(2.0, getFloat(batch, 0), 0.00001);
+    assertEquals(-5.0, getDouble(batch, 0), 0.00001);
+    assertEquals(bytes(), getBinary(batch, 0));
+    assertEquals("bye", getText(batch, 0).toString());
+    assertEquals(false, middle.isNull[0]);
+    assertEquals(2, midList.lengths[0]);
+    start = (int) midList.offsets[0];
+    assertEquals(1, midListInt.vector[start]);
+    assertEquals("bye", midListStr.toString(start));
+    assertEquals(2, midListInt.vector[start + 1]);
+    assertEquals("sigh", midListStr.toString(start + 1));
+    assertEquals(3, list.lengths[0]);
+    start = (int) list.offsets[0];
+    assertEquals(100000000, listInts.vector[start]);
+    assertEquals("cat", listStrs.toString(start));
+    assertEquals(-100000, listInts.vector[start + 1]);
+    assertEquals("in", listStrs.toString(start + 1));
+    assertEquals(1234, listInts.vector[start + 2]);
+    assertEquals("hat", listStrs.toString(start + 2));
+    assertEquals(2, map.lengths[0]);
+    start = (int) map.offsets[0];
+    assertEquals("chani", mapKey.toString(start));
+    assertEquals(5, mapValueInts.vector[start]);
+    assertEquals("chani", mapValueStrs.toString(start));
+    assertEquals("mauddib", mapKey.toString(start + 1));
+    assertEquals(1, mapValueInts.vector[start + 1]);
+    assertEquals("mauddib", mapValueStrs.toString(start + 1));
     assertEquals(Timestamp.valueOf("2000-03-12 15:00:01"),
-        tso.getPrimitiveJavaObject(readerInspector.getStructFieldData(row,
-            fields.get(12))));
-    assertEquals(HiveDecimal.create("12345678.6547457"),
-        dco.getPrimitiveJavaObject(readerInspector.getStructFieldData(row,
-            fields.get(13))));
+        timestamp.asScratchTimestamp(0));
+    assertEquals(new HiveDecimalWritable(HiveDecimal.create("12345678.6547457")),
+        decs.vector[0]);
 
     // handle the close up
-    assertEquals(false, rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
     rows.close();
   }
 
   @Test
   public void testTimestamp() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestVectorOrcFile.class) {
-      inspector = ObjectInspectorFactory.getReflectionObjectInspector(Timestamp.class,
-          ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
-
     TypeDescription schema = TypeDescription.createTimestamp();
     Writer writer = OrcFile.createWriter(testFilePath,
         OrcFile.writerOptions(conf).setSchema(schema).stripeSize(100000)
@@ -533,11 +376,15 @@ public class TestVectorOrcFile {
 
     Reader reader = OrcFile.createReader(testFilePath,
         OrcFile.readerOptions(conf).filesystem(fs));
-    RecordReader rows = reader.rows(null);
+    RecordReader rows = reader.rows();
+    batch = reader.getSchema().createRowBatch();
+    TimestampColumnVector timestamps = (TimestampColumnVector) batch.cols[0];
     int idx = 0;
-    while (rows.hasNext()) {
-      Object row = rows.next(null);
-      assertEquals(tslist.get(idx++).getNanos(), ((TimestampWritable) row).getNanos());
+    while (rows.nextBatch(batch)) {
+      for(int r=0; r < batch.size; ++r) {
+        assertEquals(tslist.get(idx++).getNanos(),
+            timestamps.asScratchTimestamp(r).getNanos());
+      }
     }
     assertEquals(tslist.size(), rows.getRowNumber());
     assertEquals(0, writer.getSchema().getMaximumId());
@@ -608,50 +455,28 @@ public class TestVectorOrcFile {
         stats[2].toString());
 
     // check the inspectors
-    StructObjectInspector readerInspector =
-        (StructObjectInspector) reader.getObjectInspector();
-    assertEquals(ObjectInspector.Category.STRUCT,
-        readerInspector.getCategory());
-    assertEquals("struct<bytes1:binary,string1:string>",
-        readerInspector.getTypeName());
-    List<? extends StructField> fields =
-        readerInspector.getAllStructFieldRefs();
-    BinaryObjectInspector bi = (BinaryObjectInspector) readerInspector.
-        getStructFieldRef("bytes1").getFieldObjectInspector();
-    StringObjectInspector st = (StringObjectInspector) readerInspector.
-        getStructFieldRef("string1").getFieldObjectInspector();
+    batch = reader.getSchema().createRowBatch();
+    BytesColumnVector bytes = (BytesColumnVector) batch.cols[0];
+    BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
     RecordReader rows = reader.rows();
-    Object row = rows.next(null);
-    assertNotNull(row);
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(4, batch.size);
+
     // check the contents of the first row
-    assertEquals(bytes(0,1,2,3,4), bi.getPrimitiveWritableObject(
-        readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals("foo", st.getPrimitiveJavaObject(readerInspector.
-        getStructFieldData(row, fields.get(1))));
+    assertEquals(bytes(0,1,2,3,4), getBinary(bytes, 0));
+    assertEquals("foo", strs.toString(0));
 
     // check the contents of second row
-    assertEquals(true, rows.hasNext());
-    row = rows.next(row);
-    assertEquals(bytes(0,1,2,3), bi.getPrimitiveWritableObject(
-        readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals("bar", st.getPrimitiveJavaObject(readerInspector.
-        getStructFieldData(row, fields.get(1))));
+    assertEquals(bytes(0,1,2,3), getBinary(bytes, 1));
+    assertEquals("bar", strs.toString(1));
 
     // check the contents of third row
-    assertEquals(true, rows.hasNext());
-    row = rows.next(row);
-    assertEquals(bytes(0,1,2,3,4,5), bi.getPrimitiveWritableObject(
-        readerInspector.getStructFieldData(row, fields.get(0))));
-    assertNull(st.getPrimitiveJavaObject(readerInspector.
-        getStructFieldData(row, fields.get(1))));
+    assertEquals(bytes(0,1,2,3,4,5), getBinary(bytes, 2));
+    assertNull(strs.toString(2));
 
     // check the contents of fourth row
-    assertEquals(true, rows.hasNext());
-    row = rows.next(row);
-    assertNull(bi.getPrimitiveWritableObject(
-        readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals("hi", st.getPrimitiveJavaObject(readerInspector.
-        getStructFieldData(row, fields.get(1))));
+    assertNull(getBinary(bytes, 3));
+    assertEquals("hi", strs.toString(3));
 
     // handle the close up
     assertEquals(false, rows.hasNext());
@@ -767,6 +592,19 @@ public class TestVectorOrcFile {
     }
   }
 
+  private static void checkInner(StructColumnVector inner, int rowId,
+                                 int rowInBatch, int i, String value) {
+    assertEquals("row " + rowId, i,
+        ((LongColumnVector) inner.fields[0]).vector[rowInBatch]);
+    if (value != null) {
+      assertEquals("row " + rowId, value,
+          ((BytesColumnVector) inner.fields[1]).toString(rowInBatch));
+    } else {
+      assertEquals("row " + rowId, true, inner.fields[1].isNull[rowInBatch]);
+      assertEquals("row " + rowId, false, inner.fields[1].noNulls);
+    }
+  }
+
   private static void setInnerList(ListColumnVector list, int rowId,
                                    List<InnerStruct> value) {
     if (value != null) {
@@ -787,6 +625,23 @@ public class TestVectorOrcFile {
     }
   }
 
+  private static void checkInnerList(ListColumnVector list, int rowId,
+                                     int rowInBatch, List<InnerStruct> value) {
+    if (value != null) {
+      assertEquals("row " + rowId, value.size(), list.lengths[rowInBatch]);
+      int start = (int) list.offsets[rowInBatch];
+      for (int i = 0; i < list.lengths[rowInBatch]; ++i) {
+        InnerStruct inner = value.get(i);
+        checkInner((StructColumnVector) list.child, rowId, i + start,
+            inner.int1, inner.string1.toString());
+      }
+      list.childCount += value.size();
+    } else {
+      assertEquals("row " + rowId, true, list.isNull[rowInBatch]);
+      assertEquals("row " + rowId, false, list.noNulls);
+    }
+  }
+
   private static void setInnerMap(MapColumnVector map, int rowId,
                                   Map<String, InnerStruct> value) {
     if (value != null) {
@@ -812,6 +667,24 @@ public class TestVectorOrcFile {
     }
   }
 
+  private static void checkInnerMap(MapColumnVector map, int rowId,
+                                    int rowInBatch,
+                                    Map<String, InnerStruct> value) {
+    if (value != null) {
+      assertEquals("row " + rowId, value.size(), map.lengths[rowInBatch]);
+      int offset = (int) map.offsets[rowInBatch];
+      for(int i=0; i < value.size(); ++i) {
+        String key = ((BytesColumnVector) map.keys).toString(offset + i);
+        InnerStruct expected = value.get(key);
+        checkInner((StructColumnVector) map.values, rowId, offset + i,
+            expected.int1, expected.string1.toString());
+      }
+    } else {
+      assertEquals("row " + rowId, true, map.isNull[rowId]);
+      assertEquals("row " + rowId, false, map.noNulls);
+    }
+  }
+
   private static void setMiddleStruct(StructColumnVector middle, int rowId,
                                       MiddleStruct value) {
     if (value != null) {
@@ -822,6 +695,17 @@ public class TestVectorOrcFile {
     }
   }
 
+  private static void checkMiddleStruct(StructColumnVector middle, int rowId,
+                                        int rowInBatch, MiddleStruct value) {
+    if (value != null) {
+      checkInnerList((ListColumnVector) middle.fields[0], rowId, rowInBatch,
+          value.list);
+    } else {
+      assertEquals("row " + rowId, true, middle.isNull[rowInBatch]);
+      assertEquals("row " + rowId, false, middle.noNulls);
+    }
+  }
+
   private static void setBigRow(VectorizedRowBatch batch, int rowId,
                                 Boolean b1, Byte b2, Short s1,
                                 Integer i1, Long l1, Float f1,
@@ -853,6 +737,160 @@ public class TestVectorOrcFile {
     setInnerMap((MapColumnVector) batch.cols[11], rowId, m2);
   }
 
+  private static void checkBigRow(VectorizedRowBatch batch,
+                                  int rowInBatch,
+                                  int rowId,
+                                  boolean b1, byte b2, short s1,
+                                  int i1, long l1, float f1,
+                                  double d1, BytesWritable b3, String s2,
+                                  MiddleStruct m1, List<InnerStruct> l2,
+                                  Map<String, InnerStruct> m2) {
+    assertEquals("row " + rowId, b1, getBoolean(batch, rowInBatch));
+    assertEquals("row " + rowId, b2, getByte(batch, rowInBatch));
+    assertEquals("row " + rowId, s1, getShort(batch, rowInBatch));
+    assertEquals("row " + rowId, i1, getInt(batch, rowInBatch));
+    assertEquals("row " + rowId, l1, getLong(batch, rowInBatch));
+    assertEquals("row " + rowId, f1, getFloat(batch, rowInBatch), 0.0001);
+    assertEquals("row " + rowId, d1, getDouble(batch, rowInBatch), 0.0001);
+    if (b3 != null) {
+      BytesColumnVector bytes = (BytesColumnVector) batch.cols[7];
+      assertEquals("row " + rowId, b3.getLength(), bytes.length[rowInBatch]);
+      for(int i=0; i < b3.getLength(); ++i) {
+        assertEquals("row " + rowId + " byte " + i, b3.getBytes()[i],
+            bytes.vector[rowInBatch][bytes.start[rowInBatch] + i]);
+      }
+    } else {
+      assertEquals("row " + rowId, true, batch.cols[7].isNull[rowInBatch]);
+      assertEquals("row " + rowId, false, batch.cols[7].noNulls);
+    }
+    if (s2 != null) {
+      assertEquals("row " + rowId, s2, getText(batch, rowInBatch).toString());
+    } else {
+      assertEquals("row " + rowId, true, batch.cols[8].isNull[rowInBatch]);
+      assertEquals("row " + rowId, false, batch.cols[8].noNulls);
+    }
+    checkMiddleStruct((StructColumnVector) batch.cols[9], rowId, rowInBatch,
+        m1);
+    checkInnerList((ListColumnVector) batch.cols[10], rowId, rowInBatch, l2);
+    checkInnerMap((MapColumnVector) batch.cols[11], rowId, rowInBatch, m2);
+  }
+
+  private static boolean getBoolean(VectorizedRowBatch batch, int rowId) {
+    return ((LongColumnVector) batch.cols[0]).vector[rowId] != 0;
+  }
+
+  private static byte getByte(VectorizedRowBatch batch, int rowId) {
+    return (byte) ((LongColumnVector) batch.cols[1]).vector[rowId];
+  }
+
+  private static short getShort(VectorizedRowBatch batch, int rowId) {
+    return (short) ((LongColumnVector) batch.cols[2]).vector[rowId];
+  }
+
+  private static int getInt(VectorizedRowBatch batch, int rowId) {
+    return (int) ((LongColumnVector) batch.cols[3]).vector[rowId];
+  }
+
+  private static long getLong(VectorizedRowBatch batch, int rowId) {
+    return ((LongColumnVector) batch.cols[4]).vector[rowId];
+  }
+
+  private static float getFloat(VectorizedRowBatch batch, int rowId) {
+    return (float) ((DoubleColumnVector) batch.cols[5]).vector[rowId];
+  }
+
+  private static double getDouble(VectorizedRowBatch batch, int rowId) {
+    return ((DoubleColumnVector) batch.cols[6]).vector[rowId];
+  }
+
+  private static BytesWritable getBinary(BytesColumnVector column, int rowId) {
+    if (column.isRepeating) {
+      rowId = 0;
+    }
+    if (column.noNulls || !column.isNull[rowId]) {
+      return new BytesWritable(Arrays.copyOfRange(column.vector[rowId],
+          column.start[rowId], column.start[rowId] + column.length[rowId]));
+    } else {
+      return null;
+    }
+  }
+
+  private static BytesWritable getBinary(VectorizedRowBatch batch, int rowId) {
+    return getBinary((BytesColumnVector) batch.cols[7], rowId);
+  }
+
+  private static Text getText(BytesColumnVector vector, int rowId) {
+    if (vector.isRepeating) {
+      rowId = 0;
+    }
+    if (vector.noNulls || !vector.isNull[rowId]) {
+      return new Text(Arrays.copyOfRange(vector.vector[rowId],
+          vector.start[rowId], vector.start[rowId] + vector.length[rowId]));
+    } else {
+      return null;
+    }
+  }
+
+  private static Text getText(VectorizedRowBatch batch, int rowId) {
+    return getText((BytesColumnVector) batch.cols[8], rowId);
+  }
+
+  private static InnerStruct getInner(StructColumnVector vector,
+                                      int rowId) {
+    return new InnerStruct(
+        (int) ((LongColumnVector) vector.fields[0]).vector[rowId],
+        getText((BytesColumnVector) vector.fields[1], rowId));
+  }
+
+  private static List<InnerStruct> getList(ListColumnVector cv,
+                                           int rowId) {
+    if (cv.isRepeating) {
+      rowId = 0;
+    }
+    if (cv.noNulls || !cv.isNull[rowId]) {
+      List<InnerStruct> result =
+          new ArrayList<InnerStruct>((int) cv.lengths[rowId]);
+      for(long i=cv.offsets[rowId];
+          i < cv.offsets[rowId] + cv.lengths[rowId]; ++i) {
+        result.add(getInner((StructColumnVector) cv.child, (int) i));
+      }
+      return result;
+    } else {
+      return null;
+    }
+  }
+
+  private static List<InnerStruct> getMidList(VectorizedRowBatch batch,
+                                              int rowId) {
+    return getList((ListColumnVector) ((StructColumnVector) batch.cols[9])
+        .fields[0], rowId);
+  }
+
+  private static List<InnerStruct> getList(VectorizedRowBatch batch,
+                                           int rowId) {
+    return getList((ListColumnVector) batch.cols[10], rowId);
+  }
+
+  private static Map<Text, InnerStruct> getMap(VectorizedRowBatch batch,
+                                               int rowId) {
+    MapColumnVector cv = (MapColumnVector) batch.cols[11];
+    if (cv.isRepeating) {
+      rowId = 0;
+    }
+    if (cv.noNulls || !cv.isNull[rowId]) {
+      Map<Text, InnerStruct> result =
+          new HashMap<Text, InnerStruct>((int) cv.lengths[rowId]);
+      for(long i=cv.offsets[rowId];
+          i < cv.offsets[rowId] + cv.lengths[rowId]; ++i) {
+        result.put(getText((BytesColumnVector) cv.keys, (int) i),
+            getInner((StructColumnVector) cv.values, (int) i));
+      }
+      return result;
+    } else {
+      return null;
+    }
+  }
+
   private static TypeDescription createInnerSchema() {
     return TypeDescription.createStruct()
         .addField("int1", TypeDescription.createInt())
@@ -981,178 +1019,114 @@ public class TestVectorOrcFile {
 
     assertEquals("count: 2 hasNull: false min: bye max: hi sum: 5", stats[9].toString());
 
-    // check the inspectors
-    StructObjectInspector readerInspector =
-        (StructObjectInspector) reader.getObjectInspector();
-    assertEquals(ObjectInspector.Category.STRUCT,
-        readerInspector.getCategory());
+    // check the schema
+    TypeDescription readerSchema = reader.getSchema();
+    assertEquals(TypeDescription.Category.STRUCT, readerSchema.getCategory());
     assertEquals("struct<boolean1:boolean,byte1:tinyint,short1:smallint,"
         + "int1:int,long1:bigint,float1:float,double1:double,bytes1:"
         + "binary,string1:string,middle:struct<list:array<struct<int1:int,"
         + "string1:string>>>,list:array<struct<int1:int,string1:string>>,"
         + "map:map<string,struct<int1:int,string1:string>>>",
-        readerInspector.getTypeName());
-    List<? extends StructField> fields =
-        readerInspector.getAllStructFieldRefs();
-    BooleanObjectInspector bo = (BooleanObjectInspector) readerInspector.
-        getStructFieldRef("boolean1").getFieldObjectInspector();
-    ByteObjectInspector by = (ByteObjectInspector) readerInspector.
-        getStructFieldRef("byte1").getFieldObjectInspector();
-    ShortObjectInspector sh = (ShortObjectInspector) readerInspector.
-        getStructFieldRef("short1").getFieldObjectInspector();
-    IntObjectInspector in = (IntObjectInspector) readerInspector.
-        getStructFieldRef("int1").getFieldObjectInspector();
-    LongObjectInspector lo = (LongObjectInspector) readerInspector.
-        getStructFieldRef("long1").getFieldObjectInspector();
-    FloatObjectInspector fl = (FloatObjectInspector) readerInspector.
-        getStructFieldRef("float1").getFieldObjectInspector();
-    DoubleObjectInspector dbl = (DoubleObjectInspector) readerInspector.
-        getStructFieldRef("double1").getFieldObjectInspector();
-    BinaryObjectInspector bi = (BinaryObjectInspector) readerInspector.
-        getStructFieldRef("bytes1").getFieldObjectInspector();
-    StringObjectInspector st = (StringObjectInspector) readerInspector.
-        getStructFieldRef("string1").getFieldObjectInspector();
-    StructObjectInspector mid = (StructObjectInspector) readerInspector.
-        getStructFieldRef("middle").getFieldObjectInspector();
-    List<? extends StructField> midFields =
-        mid.getAllStructFieldRefs();
-    ListObjectInspector midli =
-        (ListObjectInspector) midFields.get(0).getFieldObjectInspector();
-    StructObjectInspector inner = (StructObjectInspector)
-        midli.getListElementObjectInspector();
-    List<? extends StructField> inFields = inner.getAllStructFieldRefs();
-    ListObjectInspector li = (ListObjectInspector) readerInspector.
-        getStructFieldRef("list").getFieldObjectInspector();
-    MapObjectInspector ma = (MapObjectInspector) readerInspector.
-        getStructFieldRef("map").getFieldObjectInspector();
-    StringObjectInspector mk = (StringObjectInspector)
-        ma.getMapKeyObjectInspector();
+        readerSchema.toString());
+    List<String> fieldNames = readerSchema.getFieldNames();
+    List<TypeDescription> fieldTypes = readerSchema.getChildren();
+    assertEquals("boolean1", fieldNames.get(0));
+    assertEquals(TypeDescription.Category.BOOLEAN, fieldTypes.get(0).getCategory());
+    assertEquals("byte1", fieldNames.get(1));
+    assertEquals(TypeDescription.Category.BYTE, fieldTypes.get(1).getCategory());
+    assertEquals("short1", fieldNames.get(2));
+    assertEquals(TypeDescription.Category.SHORT, fieldTypes.get(2).getCategory());
+    assertEquals("int1", fieldNames.get(3));
+    assertEquals(TypeDescription.Category.INT, fieldTypes.get(3).getCategory());
+    assertEquals("long1", fieldNames.get(4));
+    assertEquals(TypeDescription.Category.LONG, fieldTypes.get(4).getCategory());
+    assertEquals("float1", fieldNames.get(5));
+    assertEquals(TypeDescription.Category.FLOAT, fieldTypes.get(5).getCategory());
+    assertEquals("double1", fieldNames.get(6));
+    assertEquals(TypeDescription.Category.DOUBLE, fieldTypes.get(6).getCategory());
+    assertEquals("bytes1", fieldNames.get(7));
+    assertEquals(TypeDescription.Category.BINARY, fieldTypes.get(7).getCategory());
+    assertEquals("string1", fieldNames.get(8));
+    assertEquals(TypeDescription.Category.STRING, fieldTypes.get(8).getCategory());
+    assertEquals("middle", fieldNames.get(9));
+    TypeDescription middle = fieldTypes.get(9);
+    assertEquals(TypeDescription.Category.STRUCT, middle.getCategory());
+    TypeDescription midList = middle.getChildren().get(0);
+    assertEquals(TypeDescription.Category.LIST, midList.getCategory());
+    TypeDescription inner = midList.getChildren().get(0);
+    assertEquals(TypeDescription.Category.STRUCT, inner.getCategory());
+    assertEquals("int1", inner.getFieldNames().get(0));
+    assertEquals("string1", inner.getFieldNames().get(1));
+
     RecordReader rows = reader.rows();
-    Object row = rows.next(null);
-    assertNotNull(row);
+    // create a new batch
+    batch = readerSchema.createRowBatch();
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(2, batch.size);
+    assertEquals(false, rows.hasNext());
+
     // check the contents of the first row
-    assertEquals(false,
-        bo.get(readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals(1, by.get(readerInspector.getStructFieldData(row,
-        fields.get(1))));
-    assertEquals(1024, sh.get(readerInspector.getStructFieldData(row,
-        fields.get(2))));
-    assertEquals(65536, in.get(readerInspector.getStructFieldData(row,
-        fields.get(3))));
-    assertEquals(Long.MAX_VALUE, lo.get(readerInspector.
-        getStructFieldData(row, fields.get(4))));
-    assertEquals(1.0, fl.get(readerInspector.getStructFieldData(row,
-        fields.get(5))), 0.00001);
-    assertEquals(-15.0, dbl.get(readerInspector.getStructFieldData(row,
-        fields.get(6))), 0.00001);
-    assertEquals(bytes(0,1,2,3,4), bi.getPrimitiveWritableObject(
-        readerInspector.getStructFieldData(row, fields.get(7))));
-    assertEquals("hi", st.getPrimitiveJavaObject(readerInspector.
-        getStructFieldData(row, fields.get(8))));
-    List<?> midRow = midli.getList(mid.getStructFieldData(readerInspector.
-        getStructFieldData(row, fields.get(9)), midFields.get(0)));
+    assertEquals(false, getBoolean(batch, 0));
+    assertEquals(1, getByte(batch, 0));
+    assertEquals(1024, getShort(batch, 0));
+    assertEquals(65536, getInt(batch, 0));
+    assertEquals(Long.MAX_VALUE, getLong(batch, 0));
+    assertEquals(1.0, getFloat(batch, 0), 0.00001);
+    assertEquals(-15.0, getDouble(batch, 0), 0.00001);
+    assertEquals(bytes(0,1,2,3,4), getBinary(batch, 0));
+    assertEquals("hi", getText(batch, 0).toString());
+    List<InnerStruct> midRow = getMidList(batch, 0);
     assertNotNull(midRow);
     assertEquals(2, midRow.size());
-    assertEquals(1, in.get(inner.getStructFieldData(midRow.get(0),
-        inFields.get(0))));
-    assertEquals("bye", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (midRow.get(0), inFields.get(1))));
-    assertEquals(2, in.get(inner.getStructFieldData(midRow.get(1),
-        inFields.get(0))));
-    assertEquals("sigh", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (midRow.get(1), inFields.get(1))));
-    List<?> list = li.getList(readerInspector.getStructFieldData(row,
-        fields.get(10)));
+    assertEquals(1, midRow.get(0).int1);
+    assertEquals("bye", midRow.get(0).string1.toString());
+    assertEquals(2, midRow.get(1).int1);
+    assertEquals("sigh", midRow.get(1).string1.toString());
+    List<InnerStruct> list = getList(batch, 0);
     assertEquals(2, list.size());
-    assertEquals(3, in.get(inner.getStructFieldData(list.get(0),
-        inFields.get(0))));
-    assertEquals("good", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (list.get(0), inFields.get(1))));
-    assertEquals(4, in.get(inner.getStructFieldData(list.get(1),
-        inFields.get(0))));
-    assertEquals("bad", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (list.get(1), inFields.get(1))));
-    Map<?,?> map = ma.getMap(readerInspector.getStructFieldData(row,
-        fields.get(11)));
+    assertEquals(3, list.get(0).int1);
+    assertEquals("good", list.get(0).string1.toString());
+    assertEquals(4, list.get(1).int1);
+    assertEquals("bad", list.get(1).string1.toString());
+    Map<Text, InnerStruct> map = getMap(batch, 0);
     assertEquals(0, map.size());
 
     // check the contents of second row
-    assertEquals(true, rows.hasNext());
-    row = rows.next(row);
-    assertEquals(true,
-        bo.get(readerInspector.getStructFieldData(row, fields.get(0))));
-    assertEquals(100, by.get(readerInspector.getStructFieldData(row,
-        fields.get(1))));
-    assertEquals(2048, sh.get(readerInspector.getStructFieldData(row,
-        fields.get(2))));
-    assertEquals(65536, in.get(readerInspector.getStructFieldData(row,
-        fields.get(3))));
-    assertEquals(Long.MAX_VALUE, lo.get(readerInspector.
-        getStructFieldData(row, fields.get(4))));
-    assertEquals(2.0, fl.get(readerInspector.getStructFieldData(row,
-        fields.get(5))), 0.00001);
-    assertEquals(-5.0, dbl.get(readerInspector.getStructFieldData(row,
-        fields.get(6))), 0.00001);
-    assertEquals(bytes(), bi.getPrimitiveWritableObject(
-        readerInspector.getStructFieldData(row, fields.get(7))));
-    assertEquals("bye", st.getPrimitiveJavaObject(readerInspector.
-        getStructFieldData(row, fields.get(8))));
-    midRow = midli.getList(mid.getStructFieldData(readerInspector.
-        getStructFieldData(row, fields.get(9)), midFields.get(0)));
+    assertEquals(true, getBoolean(batch, 1));
+    assertEquals(100, getByte(batch, 1));
+    assertEquals(2048, getShort(batch, 1));
+    assertEquals(65536, getInt(batch, 1));
+    assertEquals(Long.MAX_VALUE, getLong(batch, 1));
+    assertEquals(2.0, getFloat(batch, 1), 0.00001);
+    assertEquals(-5.0, getDouble(batch, 1), 0.00001);
+    assertEquals(bytes(), getBinary(batch, 1));
+    assertEquals("bye", getText(batch, 1).toString());
+    midRow = getMidList(batch, 1);
     assertNotNull(midRow);
     assertEquals(2, midRow.size());
-    assertEquals(1, in.get(inner.getStructFieldData(midRow.get(0),
-        inFields.get(0))));
-    assertEquals("bye", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (midRow.get(0), inFields.get(1))));
-    assertEquals(2, in.get(inner.getStructFieldData(midRow.get(1),
-        inFields.get(0))));
-    assertEquals("sigh", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (midRow.get(1), inFields.get(1))));
-    list = li.getList(readerInspector.getStructFieldData(row,
-        fields.get(10)));
+    assertEquals(1, midRow.get(0).int1);
+    assertEquals("bye", midRow.get(0).string1.toString());
+    assertEquals(2, midRow.get(1).int1);
+    assertEquals("sigh", midRow.get(1).string1.toString());
+    list = getList(batch, 1);
     assertEquals(3, list.size());
-    assertEquals(100000000, in.get(inner.getStructFieldData(list.get(0),
-        inFields.get(0))));
-    assertEquals("cat", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (list.get(0), inFields.get(1))));
-    assertEquals(-100000, in.get(inner.getStructFieldData(list.get(1),
-        inFields.get(0))));
-    assertEquals("in", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (list.get(1), inFields.get(1))));
-    assertEquals(1234, in.get(inner.getStructFieldData(list.get(2),
-        inFields.get(0))));
-    assertEquals("hat", st.getPrimitiveJavaObject(inner.getStructFieldData
-        (list.get(2), inFields.get(1))));
-    map = ma.getMap(readerInspector.getStructFieldData(row,
-        fields.get(11)));
+    assertEquals(100000000, list.get(0).int1);
+    assertEquals("cat", list.get(0).string1.toString());
+    assertEquals(-100000, list.get(1).int1);
+    assertEquals("in", list.get(1).string1.toString());
+    assertEquals(1234, list.get(2).int1);
+    assertEquals("hat", list.get(2).string1.toString());
+    map = getMap(batch, 1);
     assertEquals(2, map.size());
-    boolean[] found = new boolean[2];
-    for(Object key: map.keySet()) {
-      String str = mk.getPrimitiveJavaObject(key);
-      if (str.equals("chani")) {
-        assertEquals(false, found[0]);
-        assertEquals(5, in.get(inner.getStructFieldData(map.get(key),
-            inFields.get(0))));
-        assertEquals(str, st.getPrimitiveJavaObject(
-            inner.getStructFieldData(map.get(key), inFields.get(1))));
-        found[0] = true;
-      } else if (str.equals("mauddib")) {
-        assertEquals(false, found[1]);
-        assertEquals(1, in.get(inner.getStructFieldData(map.get(key),
-            inFields.get(0))));
-        assertEquals(str, st.getPrimitiveJavaObject(
-            inner.getStructFieldData(map.get(key), inFields.get(1))));
-        found[1] = true;
-      } else {
-        throw new IllegalArgumentException("Unknown key " + str);
-      }
-    }
-    assertEquals(true, found[0]);
-    assertEquals(true, found[1]);
+    InnerStruct value = map.get(new Text("chani"));
+    assertEquals(5, value.int1);
+    assertEquals("chani", value.string1.toString());
+    value = map.get(new Text("mauddib"));
+    assertEquals(1, value.int1);
+    assertEquals("mauddib", value.string1.toString());
 
     // handle the close up
-    assertEquals(false, rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
     rows.close();
   }
 
@@ -1216,35 +1190,36 @@ public class TestVectorOrcFile {
     }
 
     // check out the types
-    List<OrcProto.Type> types = reader.getTypes();
-    assertEquals(3, types.size());
-    assertEquals(OrcProto.Type.Kind.STRUCT, types.get(0).getKind());
-    assertEquals(2, types.get(0).getSubtypesCount());
-    assertEquals(1, types.get(0).getSubtypes(0));
-    assertEquals(2, types.get(0).getSubtypes(1));
-    assertEquals(OrcProto.Type.Kind.INT, types.get(1).getKind());
-    assertEquals(0, types.get(1).getSubtypesCount());
-    assertEquals(OrcProto.Type.Kind.STRING, types.get(2).getKind());
-    assertEquals(0, types.get(2).getSubtypesCount());
+    TypeDescription type = reader.getSchema();
+    assertEquals(TypeDescription.Category.STRUCT, type.getCategory());
+    assertEquals(2, type.getChildren().size());
+    TypeDescription type1 = type.getChildren().get(0);
+    TypeDescription type2 = type.getChildren().get(1);
+    assertEquals(TypeDescription.Category.INT, type1.getCategory());
+    assertEquals(TypeDescription.Category.STRING, type2.getCategory());
+    assertEquals("struct<int1:int,string1:string>", type.toString());
 
     // read the contents and make sure they match
     RecordReader rows1 = reader.rows(new boolean[]{true, true, false});
     RecordReader rows2 = reader.rows(new boolean[]{true, false, true});
     r1 = new Random(1);
     r2 = new Random(2);
-    OrcStruct row1 = null;
-    OrcStruct row2 = null;
-    for(int i = 0; i < 21000; ++i) {
-      assertEquals(true, rows1.hasNext());
-      assertEquals(true, rows2.hasNext());
-      row1 = (OrcStruct) rows1.next(row1);
-      row2 = (OrcStruct) rows2.next(row2);
-      assertEquals(r1.nextInt(), ((IntWritable) row1.getFieldValue(0)).get());
-      assertEquals(Long.toHexString(r2.nextLong()),
-          row2.getFieldValue(1).toString());
-    }
-    assertEquals(false, rows1.hasNext());
-    assertEquals(false, rows2.hasNext());
+    VectorizedRowBatch batch1 = reader.getSchema().createRowBatch(1000);
+    VectorizedRowBatch batch2 = reader.getSchema().createRowBatch(1000);
+    for(int i = 0; i < 21000; i += 1000) {
+      assertEquals(true, rows1.nextBatch(batch1));
+      assertEquals(true, rows2.nextBatch(batch2));
+      assertEquals(1000, batch1.size);
+      assertEquals(1000, batch2.size);
+      for(int j=0; j < 1000; ++j) {
+        assertEquals(r1.nextInt(),
+            ((LongColumnVector) batch1.cols[0]).vector[j]);
+        assertEquals(Long.toHexString(r2.nextLong()),
+            ((BytesColumnVector) batch2.cols[1]).toString(j));
+      }
+    }
+    assertEquals(false, rows1.nextBatch(batch1));
+    assertEquals(false, rows2.nextBatch(batch2));
     rows1.close();
     rows2.close();
   }
@@ -1355,17 +1330,33 @@ public class TestVectorOrcFile {
     Reader reader = OrcFile.createReader(file,
         OrcFile.readerOptions(conf));
     RecordReader rows = reader.rows();
-    OrcStruct row = null;
+    batch = reader.getSchema().createRowBatch(1000);
+    TimestampColumnVector times = (TimestampColumnVector) batch.cols[0];
+    LongColumnVector dates = (LongColumnVector) batch.cols[1];
     for (int year = minYear; year < maxYear; ++year) {
+      rows.nextBatch(batch);
+      assertEquals(1000, batch.size);
       for(int ms = 1000; ms < 2000; ++ms) {
-        row = (OrcStruct) rows.next(row);
-        assertEquals(new TimestampWritable
-                (Timestamp.valueOf(year + "-05-05 12:34:56." + ms)),
-            row.getFieldValue(0));
-        assertEquals(new DateWritable(new Date(year - 1900, 11, 25)),
-            row.getFieldValue(1));
+        StringBuilder buffer = new StringBuilder();
+        times.stringifyValue(buffer, ms - 1000);
+        String expected = Integer.toString(year) + "-05-05 12:34:56.";
+        // suppress the final zeros on the string by dividing by the largest
+        // power of 10 that divides evenly.
+        int roundedMs = ms;
+        for(int round = 1000; round > 0; round /= 10) {
+          if (ms % round == 0) {
+            roundedMs = ms / round;
+            break;
+          }
+        }
+        expected += roundedMs;
+        assertEquals(expected, buffer.toString());
+        assertEquals(Integer.toString(year) + "-12-25",
+            new DateWritable((int) dates.vector[ms - 1000]).toString());
       }
     }
+    rows.nextBatch(batch);
+    assertEquals(0, batch.size);
   }
 
   @Test
@@ -1483,6 +1474,7 @@ public class TestVectorOrcFile {
     for(int c=0; c < batch.cols.length; ++c) {
       batch.cols[c].setRepeating(true);
     }
+    ((UnionColumnVector) batch.cols[1]).fields[0].isRepeating = true;
     setUnion(batch, 0, null, 0, 1732050807, null, null);
     for(int i=0; i < 5; ++i) {
       writer.addRowBatch(batch);
@@ -1540,83 +1532,115 @@ public class TestVectorOrcFile {
     RecordReader rows = reader.rows();
     assertEquals(0, rows.getRowNumber());
     assertEquals(0.0, rows.getProgress(), 0.000001);
-    assertEquals(true, rows.hasNext());
-    OrcStruct row = (OrcStruct) rows.next(null);
-    assertEquals(1, rows.getRowNumber());
-    ObjectInspector inspector = reader.getObjectInspector();
+
+    schema = reader.getSchema();
+    batch = schema.createRowBatch(74);
+    assertEquals(0, rows.getRowNumber());
+    rows.nextBatch(batch);
+    assertEquals(74, batch.size);
+    assertEquals(74, rows.getRowNumber());
+    TimestampColumnVector ts = (TimestampColumnVector) batch.cols[0];
+    UnionColumnVector union = (UnionColumnVector) batch.cols[1];
+    LongColumnVector longs = (LongColumnVector) union.fields[0];
+    BytesColumnVector strs = (BytesColumnVector) union.fields[1];
+    DecimalColumnVector decs = (DecimalColumnVector) batch.cols[2];
+
     assertEquals("struct<time:timestamp,union:uniontype<int,string>,decimal:decimal(38,18)>",
-        inspector.getTypeName());
-    assertEquals(new TimestampWritable(Timestamp.valueOf("2000-03-12 15:00:00")),
-        row.getFieldValue(0));
-    OrcUnion union = (OrcUnion) row.getFieldValue(1);
-    assertEquals(0, union.getTag());
-    assertEquals(new IntWritable(42), union.getObject());
-    assertEquals(new HiveDecimalWritable(HiveDecimal.create("12345678.6547456")),
-        row.getFieldValue(2));
-    row = (OrcStruct) rows.next(row);
-    assertEquals(2, rows.getRowNumber());
-    assertEquals(new TimestampWritable(Timestamp.valueOf("2000-03-20 12:00:00.123456789")),
-        row.getFieldValue(0));
-    assertEquals(1, union.getTag());
-    assertEquals(new Text("hello"), union.getObject());
-    assertEquals(new HiveDecimalWritable(HiveDecimal.create("-5643.234")),
-        row.getFieldValue(2));
-    row = (OrcStruct) rows.next(row);
-    assertEquals(null, row.getFieldValue(0));
-    assertEquals(null, row.getFieldValue(1));
-    assertEquals(null, row.getFieldValue(2));
-    row = (OrcStruct) rows.next(row);
-    assertEquals(null, row.getFieldValue(0));
-    union = (OrcUnion) row.getFieldValue(1);
-    assertEquals(0, union.getTag());
-    assertEquals(null, union.getObject());
-    assertEquals(null, row.getFieldValue(2));
-    row = (OrcStruct) rows.next(row);
-    assertEquals(null, row.getFieldValue(0));
-    assertEquals(1, union.getTag());
-    assertEquals(null, union.getObject());
-    assertEquals(null, row.getFieldValue(2));
-    row = (OrcStruct) rows.next(row);
-    assertEquals(new TimestampWritable(Timestamp.valueOf("1970-01-01 00:00:00")),
-        row.getFieldValue(0));
-    assertEquals(new IntWritable(200000), union.getObject());
-    assertEquals(new HiveDecimalWritable(HiveDecimal.create("10000000000000000000")),
-                 row.getFieldValue(2));
+        schema.toString());
+    assertEquals("2000-03-12 15:00:00.0", ts.asScratchTimestamp(0).toString());
+    assertEquals(0, union.tags[0]);
+    assertEquals(42, longs.vector[0]);
+    assertEquals("12345678.6547456", decs.vector[0].toString());
+
+    assertEquals("2000-03-20 12:00:00.123456789", ts.asScratchTimestamp(1).toString());
+    assertEquals(1, union.tags[1]);
+    assertEquals("hello", strs.toString(1));
+    assertEquals("-5643.234", decs.vector[1].toString());
+
+    assertEquals(false, ts.noNulls);
+    assertEquals(false, union.noNulls);
+    assertEquals(false, decs.noNulls);
+    assertEquals(true, ts.isNull[2]);
+    assertEquals(true, union.isNull[2]);
+    assertEquals(true, decs.isNull[2]);
+
+    assertEquals(true, ts.isNull[3]);
+    assertEquals(false, union.isNull[3]);
+    assertEquals(0, union.tags[3]);
+    assertEquals(true, longs.isNull[3]);
+    assertEquals(true, decs.isNull[3]);
+
+    assertEquals(true, ts.isNull[4]);
+    assertEquals(false, union.isNull[4]);
+    assertEquals(1, union.tags[4]);
+    assertEquals(true, strs.isNull[4]);
+    assertEquals(true, decs.isNull[4]);
+
+    assertEquals(false, ts.isNull[5]);
+    assertEquals("1970-01-01 00:00:00.0", ts.asScratchTimestamp(5).toString());
+    assertEquals(false, union.isNull[5]);
+    assertEquals(0, union.tags[5]);
+    assertEquals(false, longs.isNull[5]);
+    assertEquals(200000, longs.vector[5]);
+    assertEquals(false, decs.isNull[5]);
+    assertEquals("10000000000000000000", decs.vector[5].toString());
+
     rand = new Random(42);
     for(int i=1970; i < 2038; ++i) {
-      row = (OrcStruct) rows.next(row);
-      assertEquals(new TimestampWritable(Timestamp.valueOf(i + "-05-05 12:34:56." + i)),
-          row.getFieldValue(0));
+      int row = 6 + i - 1970;
+      assertEquals(Timestamp.valueOf(i + "-05-05 12:34:56." + i),
+          ts.asScratchTimestamp(row));
       if ((i & 1) == 0) {
-        assertEquals(0, union.getTag());
-        assertEquals(new IntWritable(i*i), union.getObject());
+        assertEquals(0, union.tags[row]);
+        assertEquals(i*i, longs.vector[row]);
       } else {
-        assertEquals(1, union.getTag());
-        assertEquals(new Text(Integer.toString(i * i)), union.getObject());
+        assertEquals(1, union.tags[row]);
+        assertEquals(Integer.toString(i * i), strs.toString(row));
       }
       assertEquals(new HiveDecimalWritable(HiveDecimal.create(new BigInteger(64, rand),
-                                   rand.nextInt(18))), row.getFieldValue(2));
-    }
-    for(int i=0; i < 5000; ++i) {
-      row = (OrcStruct) rows.next(row);
-      assertEquals(new IntWritable(1732050807), union.getObject());
-    }
-    row = (OrcStruct) rows.next(row);
-    assertEquals(new IntWritable(0), union.getObject());
-    row = (OrcStruct) rows.next(row);
-    assertEquals(new IntWritable(10), union.getObject());
-    row = (OrcStruct) rows.next(row);
-    assertEquals(new IntWritable(138), union.getObject());
-    assertEquals(false, rows.hasNext());
+                                   rand.nextInt(18))), decs.vector[row]);
+    }
+
+    // rebuild the row batch, so that we can read by 1000 rows
+    batch = schema.createRowBatch(1000);
+    ts = (TimestampColumnVector) batch.cols[0];
+    union = (UnionColumnVector) batch.cols[1];
+    longs = (LongColumnVector) union.fields[0];
+    strs = (BytesColumnVector) union.fields[1];
+    decs = (DecimalColumnVector) batch.cols[2];
+
+    for(int i=0; i < 5; ++i) {
+      rows.nextBatch(batch);
+      assertEquals("batch " + i, 1000, batch.size);
+      assertEquals("batch " + i, false, union.isRepeating);
+      assertEquals("batch " + i, true, union.noNulls);
+      for(int r=0; r < batch.size; ++r) {
+        assertEquals("bad tag at " + i + "." +r, 0, union.tags[r]);
+      }
+      assertEquals("batch " + i, true, longs.isRepeating);
+      assertEquals("batch " + i, 1732050807, longs.vector[0]);
+    }
+
+    rows.nextBatch(batch);
+    assertEquals(3, batch.size);
+    assertEquals(0, union.tags[0]);
+    assertEquals(0, longs.vector[0]);
+    assertEquals(0, union.tags[1]);
+    assertEquals(10, longs.vector[1]);
+    assertEquals(0, union.tags[2]);
+    assertEquals(138, longs.vector[2]);
+
+    rows.nextBatch(batch);
+    assertEquals(0, batch.size);
     assertEquals(1.0, rows.getProgress(), 0.00001);
     assertEquals(reader.getNumberOfRows(), rows.getRowNumber());
     rows.seekToRow(1);
-    row = (OrcStruct) rows.next(row);
-    assertEquals(new TimestampWritable(Timestamp.valueOf("2000-03-20 12:00:00.123456789")),
-        row.getFieldValue(0));
-    assertEquals(1, union.getTag());
-    assertEquals(new Text("hello"), union.getObject());
-    assertEquals(new HiveDecimalWritable(HiveDecimal.create("-5643.234")), row.getFieldValue(2));
+    rows.nextBatch(batch);
+    assertEquals(1000, batch.size);
+    assertEquals(Timestamp.valueOf("2000-03-20 12:00:00.123456789"), ts.asScratchTimestamp(0));
+    assertEquals(1, union.tags[0]);
+    assertEquals("hello", strs.toString(0));
+    assertEquals(new HiveDecimalWritable(HiveDecimal.create("-5643.234")), decs.vector[0]);
     rows.close();
   }
 
@@ -1647,17 +1671,22 @@ public class TestVectorOrcFile {
     writer.close();
     Reader reader = OrcFile.createReader(testFilePath,
         OrcFile.readerOptions(conf).filesystem(fs));
+    assertEquals(CompressionKind.SNAPPY, reader.getCompressionKind());
     RecordReader rows = reader.rows();
+    batch = reader.getSchema().createRowBatch(1000);
     rand = new Random(12);
-    OrcStruct row = null;
-    for(int i=0; i < 10000; ++i) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      assertEquals(rand.nextInt(), ((IntWritable) row.getFieldValue(0)).get());
-      assertEquals(Integer.toHexString(rand.nextInt()),
-          row.getFieldValue(1).toString());
+    LongColumnVector longs = (LongColumnVector) batch.cols[0];
+    BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
+    for(int b=0; b < 10; ++b) {
+      rows.nextBatch(batch);
+      assertEquals(1000, batch.size);
+      for(int r=0; r < batch.size; ++r) {
+        assertEquals(rand.nextInt(), longs.vector[r]);
+        assertEquals(Integer.toHexString(rand.nextInt()), strs.toString(r));
+      }
     }
-    assertEquals(false, rows.hasNext());
+    rows.nextBatch(batch);
+    assertEquals(0, batch.size);
     rows.close();
   }
 
@@ -1697,18 +1726,23 @@ public class TestVectorOrcFile {
     assertEquals(0, stripe.getIndexLength());
     RecordReader rows = reader.rows();
     rand = new Random(24);
-    OrcStruct row = null;
-    for(int i=0; i < 10000; ++i) {
-      int intVal = rand.nextInt();
-      String strVal = Integer.toBinaryString(rand.nextInt());
-      for(int j=0; j < 5; ++j) {
-        assertEquals(true, rows.hasNext());
-        row = (OrcStruct) rows.next(row);
-        assertEquals(intVal, ((IntWritable) row.getFieldValue(0)).get());
-        assertEquals(strVal, row.getFieldValue(1).toString());
+    batch = reader.getSchema().createRowBatch(1000);
+    LongColumnVector longs = (LongColumnVector) batch.cols[0];
+    BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
+    for(int i=0; i < 50; ++i) {
+      rows.nextBatch(batch);
+      assertEquals("batch " + i, 1000, batch.size);
+      for(int j=0; j < 200; ++j) {
+        int intVal = rand.nextInt();
+        String strVal = Integer.toBinaryString(rand.nextInt());
+        for (int k = 0; k < 5; ++k) {
+          assertEquals(intVal, longs.vector[j * 5 + k]);
+          assertEquals(strVal, strs.toString(j * 5 + k));
+        }
       }
     }
-    assertEquals(false, rows.hasNext());
+    rows.nextBatch(batch);
+    assertEquals(0, batch.size);
     rows.close();
   }
 
@@ -1772,34 +1806,18 @@ public class TestVectorOrcFile {
       assertEquals(1000,
           colIndex.getEntry(0).getStatistics().getNumberOfValues());
     }
-    OrcStruct row = null;
-    for(int i=COUNT-1; i >= 0; --i) {
-      rows.seekToRow(i);
-      row = (OrcStruct) rows.next(row);
-      BigRow expected = createRandomRow(intValues, doubleValues,
-          stringValues, byteValues, words, i);
-      assertEquals(expected.boolean1.booleanValue(),
-          ((BooleanWritable) row.getFieldValue(0)).get());
-      assertEquals(expected.byte1.byteValue(),
-          ((ByteWritable) row.getFieldValue(1)).get());
-      assertEquals(expected.short1.shortValue(),
-          ((ShortWritable) row.getFieldValue(2)).get());
-      assertEquals(expected.int1.intValue(),
-          ((IntWritable) row.getFieldValue(3)).get());
-      assertEquals(expected.long1.longValue(),
-          ((LongWritable) row.getFieldValue(4)).get());
-      assertEquals(expected.float1,
-          ((FloatWritable) row.getFieldValue(5)).get(), 0.0001);
-      assertEquals(expected.double1,
-          ((DoubleWritable) row.getFieldValue(6)).get(), 0.0001);
-      assertEquals(expected.bytes1, row.getFieldValue(7));
-      assertEquals(expected.string1, row.getFieldValue(8));
-      List<InnerStruct> expectedList = expected.middle.list;
-      List<OrcStruct> actualList =
-          (List<OrcStruct>) ((OrcStruct) row.getFieldValue(9)).getFieldValue(0);
-      compareList(expectedList, actualList, "middle list " + i);
-      compareList(expected.list, (List<OrcStruct>) row.getFieldValue(10),
-          "list " + i);
+    batch = reader.getSchema().createRowBatch();
+    int nextRowInBatch = -1;
+    for(int i=COUNT-1; i >= 0; --i, --nextRowInBatch) {
+      // if we have consumed the previous batch read a new one
+      if (nextRowInBatch < 0) {
+        long base = Math.max(i - 1023, 0);
+        rows.seekToRow(base);
+        assertEquals("row " + i, true, rows.nextBatch(batch));
+        nextRowInBatch = batch.size - 1;
+      }
+      checkRandomRow(batch, intValues, doubleValues,
+          stringValues, byteValues, words, i, nextRowInBatch);
     }
     rows.close();
     Iterator<StripeInformation> stripeIterator =
@@ -1825,41 +1843,20 @@ public class TestVectorOrcFile {
         .range(offsetOfStripe2, offsetOfStripe4 - offsetOfStripe2)
         .include(columns));
     rows.seekToRow(lastRowOfStripe2);
-    for(int i = 0; i < 2; ++i) {
-      row = (OrcStruct) rows.next(row);
-      BigRow expected = createRandomRow(intValues, doubleValues,
-                                        stringValues, byteValues, words,
-                                        (int) (lastRowOfStripe2 + i));
-
-      assertEquals(expected.long1.longValue(),
-          ((LongWritable) row.getFieldValue(4)).get());
-      assertEquals(expected.string1, row.getFieldValue(8));
-    }
+    // we only want two rows
+    batch = reader.getSchema().createRowBatch(2);
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1, batch.size);
+    assertEquals(intValues[(int) lastRowOfStripe2], getLong(batch, 0));
+    assertEquals(stringValues[(int) lastRowOfStripe2],
+        getText(batch, 0).toString());
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(intValues[(int) lastRowOfStripe2 + 1], getLong(batch, 0));
+    assertEquals(stringValues[(int) lastRowOfStripe2 + 1],
+        getText(batch, 0).toString());
     rows.close();
   }
 
-  private void compareInner(InnerStruct expect,
-                            OrcStruct actual,
-                            String context) throws Exception {
-    if (expect == null || actual == null) {
-      assertEquals(context, null, expect);
-      assertEquals(context, null, actual);
-    } else {
-      assertEquals(context, expect.int1,
-          ((IntWritable) actual.getFieldValue(0)).get());
-      assertEquals(context, expect.string1, actual.getFieldValue(1));
-    }
-  }
-
-  private void compareList(List<InnerStruct> expect,
-                           List<OrcStruct> actual,
-                           String context) throws Exception {
-    assertEquals(context, expect.size(), actual.size());
-    for(int j=0; j < expect.size(); ++j) {
-      compareInner(expect.get(j), actual.get(j), context + " at " + j);
-    }
-  }
-
   private void appendRandomRow(VectorizedRowBatch batch,
                                long[] intValues, double[] doubleValues,
                                String[] stringValues,
@@ -1874,17 +1871,18 @@ public class TestVectorOrcFile {
         new MiddleStruct(inner, inner2), list(), map(inner, inner2));
   }
 
-  private BigRow createRandomRow(long[] intValues, double[] doubleValues,
-                                 String[] stringValues,
-                                 BytesWritable[] byteValues,
-                                 String[] words, int i) {
+  private void checkRandomRow(VectorizedRowBatch batch,
+                              long[] intValues, double[] doubleValues,
+                              String[] stringValues,
+                              BytesWritable[] byteValues,
+                              String[] words, int i, int rowInBatch) {
     InnerStruct inner = new InnerStruct((int) intValues[i], stringValues[i]);
     InnerStruct inner2 = new InnerStruct((int) (intValues[i] >> 32),
         words[i % words.length] + "-x");
-    return new BigRow((intValues[i] & 1) == 0, (byte) intValues[i],
+    checkBigRow(batch, rowInBatch, i, (intValues[i] & 1) == 0, (byte) intValues[i],
         (short) intValues[i], (int) intValues[i], intValues[i],
-        (float) doubleValues[i], doubleValues[i], byteValues[i],stringValues[i],
-        new MiddleStruct(inner, inner2), list(), map(inner,inner2));
+        (float) doubleValues[i], doubleValues[i], byteValues[i], stringValues[i],
+        new MiddleStruct(inner, inner2), list(), map(inner, inner2));
   }
 
   private static class MyMemoryManager extends MemoryManager {
@@ -2045,15 +2043,19 @@ public class TestVectorOrcFile {
         .range(0L, Long.MAX_VALUE)
         .include(new boolean[]{true, true, true})
         .searchArgument(sarg, new String[]{null, "int1", "string1"}));
+    batch = reader.getSchema().createRowBatch(2000);
+    LongColumnVector ints = (LongColumnVector) batch.cols[0];
+    BytesColumnVector strs = (BytesColumnVector) batch.cols[1];
+
     assertEquals(1000L, rows.getRowNumber());
-    OrcStruct row = null;
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1000, batch.size);
+
     for(int i=1000; i < 2000; ++i) {
-      assertTrue(rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      assertEquals(300 * i, ((IntWritable) row.getFieldValue(0)).get());
-      assertEquals(Integer.toHexString(10*i), row.getFieldValue(1).toString());
+      assertEquals(300 * i, ints.vector[i - 1000]);
+      assertEquals(Integer.toHexString(10*i), strs.toString(i - 1000));
     }
-    assertTrue(!rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
     assertEquals(3500, rows.getRowNumber());
 
     // look through the file with no rows selected
@@ -2082,40 +2084,26 @@ public class TestVectorOrcFile {
         .range(0L, Long.MAX_VALUE)
         .include(new boolean[]{true, true, true})
         .searchArgument(sarg, new String[]{null, "int1", "string1"}));
-    row = null;
+    assertEquals(0, rows.getRowNumber());
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1000, batch.size);
+    assertEquals(3000, rows.getRowNumber());
     for(int i=0; i < 1000; ++i) {
-      assertTrue(rows.hasNext());
-      assertEquals(i, rows.getRowNumber());
-      row = (OrcStruct) rows.next(row);
-      assertEquals(300 * i, ((IntWritable) row.getFieldValue(0)).get());
-      assertEquals(Integer.toHexString(10*i), row.getFieldValue(1).toString());
+      assertEquals(300 * i, ints.vector[i]);
+      assertEquals(Integer.toHexString(10*i), strs.toString(i));
     }
+
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(500, batch.size);
+    assertEquals(3500, rows.getRowNumber());
     for(int i=3000; i < 3500; ++i) {
-      assertTrue(rows.hasNext());
-      assertEquals(i, rows.getRowNumber());
-      row = (OrcStruct) rows.next(row);
-      assertEquals(300 * i, ((IntWritable) row.getFieldValue(0)).get());
-      assertEquals(Integer.toHexString(10*i), row.getFieldValue(1).toString());
+      assertEquals(300 * i, ints.vector[i - 3000]);
+      assertEquals(Integer.toHexString(10*i), strs.toString(i - 3000));
     }
-    assertTrue(!rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
     assertEquals(3500, rows.getRowNumber());
   }
 
-  private static String pad(String value, int length) {
-    if (value.length() == length) {
-      return value;
-    } else if (value.length() > length) {
-      return value.substring(0, length);
-    } else {
-      StringBuilder buf = new StringBuilder();
-      buf.append(value);
-      for(int i=0; i < length - value.length(); ++i) {
-        buf.append(' ');
-      }
-      return buf.toString();
-    }
-  }
-
   /**
    * Test all of the types that have distinct ORC writers using the vectorized
    * writer with different combinations of repeating and null values.
@@ -2232,8 +2220,7 @@ public class TestVectorOrcFile {
       ((LongColumnVector) batch.cols[6]).vector[r] =
           new DateWritable(new Date(111, 6, 1)).getDays() + r;
 
-      Timestamp ts = new Timestamp(115, 9, 23, 10, 11, 59, 999999999);
-      ts.setTime(ts.getTime() + r * 1000);
+      Timestamp ts = new Timestamp(115, 9, 25, 10, 11, 59 + r, 999999999);
       ((TimestampColumnVector) batch.cols[7]).set(r, ts);
       ((DecimalColumnVector) batch.cols[8]).vector[r] =
           new HiveDecimalWritable("1.234567");
@@ -2302,118 +2289,125 @@ public class TestVectorOrcFile {
     assertEquals(14813, ((StringColumnStatistics) stats[12]).getSum());
 
     RecordReader rows = reader.rows();
-    OrcStruct row = null;
+    batch = reader.getSchema().createRowBatch(1024);
+    BytesColumnVector bins = (BytesColumnVector) batch.cols[0];
+    LongColumnVector bools = (LongColumnVector) batch.cols[1];
+    LongColumnVector bytes = (LongColumnVector) batch.cols[2];
+    LongColumnVector longs = (LongColumnVector) batch.cols[3];
+    DoubleColumnVector floats = (DoubleColumnVector) batch.cols[4];
+    DoubleColumnVector doubles = (DoubleColumnVector) batch.cols[5];
+    LongColumnVector dates = (LongColumnVector) batch.cols[6];
+    TimestampColumnVector times = (TimestampColumnVector) batch.cols[7];
+    DecimalColumnVector decs = (DecimalColumnVector) batch.cols[8];
+    BytesColumnVector strs = (BytesColumnVector) batch.cols[9];
+    BytesColumnVector chars = (BytesColumnVector) batch.cols[10];
+    BytesColumnVector vcs = (BytesColumnVector) batch.cols[11];
+    StructColumnVector structs = (StructColumnVector) batch.cols[12];
+    UnionColumnVector unions = (UnionColumnVector) batch.cols[13];
+    ListColumnVector lists = (ListColumnVector) batch.cols[14];
+    MapColumnVector maps = (MapColumnVector) batch.cols[15];
+    LongColumnVector structInts = (LongColumnVector) structs.fields[0];
+    LongColumnVector unionInts = (LongColumnVector) unions.fields[1];
+    LongColumnVector listInts = (LongColumnVector) lists.child;
+    BytesColumnVector mapKeys = (BytesColumnVector) maps.keys;
+    BytesColumnVector mapValues = (BytesColumnVector) maps.values;
+
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1024, batch.size);
 
     // read the 1024 nulls
-    for(int r=0; r < 1024; ++r) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      for(int f=0; f < row.getNumFields(); ++f) {
-        assertEquals("non-null on row " + r + " field " + f,
-            null, row.getFieldValue(f));
-      }
+    for(int f=0; f < batch.cols.length; ++f) {
+      assertEquals("field " + f,
+          true, batch.cols[f].isRepeating);
+      assertEquals("field " + f,
+          false, batch.cols[f].noNulls);
+      assertEquals("field " + f,
+          true, batch.cols[f].isNull[0]);
     }
 
     // read the 1024 repeat values
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1024, batch.size);
     for(int r=0; r < 1024; ++r) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      assertEquals("row " + r, "48 6f 72 74 6f 6e",
-          row.getFieldValue(0).toString());
-      assertEquals("row " + r, "true", row.getFieldValue(1).toString());
-      assertEquals("row " + r, "-126", row.getFieldValue(2).toString());
-      assertEquals("row " + r, "1311768467463790320",
-          row.getFieldValue(3).toString());
-      assertEquals("row " + r, "1.125", row.getFieldValue(4).toString());
-      assertEquals("row " + r, "9.765625E-4", row.getFieldValue(5).toString());
-      assertEquals("row " + r, "2011-07-01", row.getFieldValue(6).toString());
+      assertEquals("row " + r, "Horton", bins.toString(r));
+      assertEquals("row " + r, 1, bools.vector[r]);
+      assertEquals("row " + r, -126, bytes.vector[r]);
+      assertEquals("row " + r, 1311768467463790320L, longs.vector[r]);
+      assertEquals("row " + r, 1.125, floats.vector[r], 0.00001);
+      assertEquals("row " + r, 9.765625E-4, doubles.vector[r], 0.000001);
+      assertEquals("row " + r, "2011-07-01",
+          new DateWritable((int) dates.vector[r]).toString());
       assertEquals("row " + r, "2015-10-23 10:11:59.999999999",
-          row.getFieldValue(7).toString());
-      assertEquals("row " + r, "1.234567", row.getFieldValue(8).toString());
-      assertEquals("row " + r, "Echelon", row.getFieldValue(9).toString());
-      assertEquals("row " + r, "Juggernaut", row.getFieldValue(10).toString());
-      assertEquals("row " + r, "Dreadnaugh", row.getFieldValue(11).toString());
-      assertEquals("row " + r, "{123}", row.getFieldValue(12).toString());
-      assertEquals("row " + r, "union(1, 1234)",
-          row.getFieldValue(13).toString());
-      assertEquals("row " + r, "[31415, 31415, 31415]",
-          row.getFieldValue(14).toString());
-      assertEquals("row " + r, "{ORC=fast, Hive=fast, LLAP=fast}",
-          row.getFieldValue(15).toString());
+          times.asScratchTimestamp(r).toString());
+      assertEquals("row " + r, "1.234567", decs.vector[r].toString());
+      assertEquals("row " + r, "Echelon", strs.toString(r));
+      assertEquals("row " + r, "Juggernaut", chars.toString(r));
+      assertEquals("row " + r, "Dreadnaugh", vcs.toString(r));
+      assertEquals("row " + r, 123, structInts.vector[r]);
+      assertEquals("row " + r, 1, unions.tags[r]);
+      assertEquals("row " + r, 1234, unionInts.vector[r]);
+      assertEquals("row " + r, 3, lists.lengths[r]);
+      assertEquals("row " + r, true, listInts.isRepeating);
+      assertEquals("row " + r, 31415, listInts.vector[0]);
+      assertEquals("row " + r, 3, maps.lengths[r]);
+      assertEquals("row " + r, "ORC", mapKeys.toString((int) maps.offsets[r]));
+      assertEquals("row " + r, "Hive", mapKeys.toString((int) maps.offsets[r] + 1));
+      assertEquals("row " + r, "LLAP", mapKeys.toString((int) maps.offsets[r] + 2));
+      assertEquals("row " + r, "fast", mapValues.toString((int) maps.offsets[r]));
+      assertEquals("row " + r, "fast", mapValues.toString((int) maps.offsets[r] + 1));
+      assertEquals("row " + r, "fast", mapValues.toString((int) maps.offsets[r] + 2));
     }
 
     // read the second set of 1024 nulls
-    for(int r=0; r < 1024; ++r) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      for(int f=0; f < row.getNumFields(); ++f) {
-        assertEquals("non-null on row " + r + " field " + f,
-            null, row.getFieldValue(f));
-      }
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1024, batch.size);
+    for(int f=0; f < batch.cols.length; ++f) {
+      assertEquals("field " + f,
+          true, batch.cols[f].isRepeating);
+      assertEquals("field " + f,
+          false, batch.cols[f].noNulls);
+      assertEquals("field " + f,
+          true, batch.cols[f].isNull[0]);
     }
+
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1024, batch.size);
     for(int r=0; r < 1024; ++r) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      byte[] hex = Integer.toHexString(r).getBytes();
-      StringBuilder expected = new StringBuilder();
-      for(int i=0; i < hex.length; ++i) {
-        if (i != 0) {
-          expected.append(' ');
-        }
-        expected.append(Integer.toHexString(hex[i]));
-      }
-      assertEquals("row " + r, expected.toString(),
-          row.getFieldValue(0).toString());
-      assertEquals("row " + r, r % 2 == 1 ? "true" : "false",
-          row.getFieldValue(1).toString());
-      assertEquals("row " + r, Integer.toString((byte) (r % 255)),
-          row.getFieldValue(2).toString());
-      assertEquals("row " + r, Long.toString(31415L * r),
-          row.getFieldValue(3).toString());
-      assertEquals("row " + r, Float.toString(1.125F * r),
-          row.getFieldValue(4).toString());
-      assertEquals("row " + r, Double.toString(0.0009765625 * r),
-          row.getFieldValue(5).toString());
-      assertEquals("row " + r, new Date(111, 6, 1 + r).toString(),
-          row.getFieldValue(6).toString());
-      Timestamp ts = new Timestamp(115, 9, 23, 10, 11, 59, 999999999);
-      ts.setTime(ts.getTime() + r * 1000);
+      String hex = Integer.toHexString(r);
+
+      assertEquals("row " + r, hex, bins.toString(r));
+      assertEquals("row " + r, r % 2 == 1 ? 1 : 0, bools.vector[r]);
+      assertEquals("row " + r, (byte) (r % 255), bytes.vector[r]);
+      assertEquals("row " + r, 31415L * r, longs.vector[r]);
+      assertEquals("row " + r, 1.125F * r, floats.vector[r], 0.0001);
+      assertEquals("row " + r, 0.0009765625 * r, doubles.vector[r], 0.000001);
+      assertEquals("row " + r, new DateWritable(new Date(111, 6, 1 + r)),
+          new DateWritable((int) dates.vector[r]));
       assertEquals("row " + r,
-          ts.toString(),
-          row.getFieldValue(7).toString());
-      assertEquals("row " + r, "1.234567", row.getFieldValue(8).toString());
-      assertEquals("row " + r, Integer.toString(r),
-          row.getFieldValue(9).toString());
-      assertEquals("row " + r, pad(Integer.toHexString(r), 10),
-          row.getFieldValue(10).toString());
-      assertEquals("row " + r, Integer.toHexString(r * 128),
-          row.getFieldValue(11).toString());
-      assertEquals("row " + r, "{" + Integer.toString(r + 13) + "}",
-          row.getFieldValue(12).toString());
-      assertEquals("row " + r, "union(1, " + Integer.toString(r + 42) + ")",
-          row.getFieldValue(13).toString());
-      assertEquals("row " + r, "[31415, 31416, 31417]",
-          row.getFieldValue(14).toString());
-      expected = new StringBuilder();
-      expected.append('{');
-      expected.append(Integer.toHexString(3 * r));
-      expected.append('=');
-      expected.append(3 * r);
-      expected.append(", ");
-      expected.append(Integer.toHexString(3 * r + 1));
-      expected.append('=');
-      expected.append(3 * r + 1);
-      expected.append(", ");
-      expected.append(Integer.toHexString(3 * r + 2));
-      expected.append('=');
-      expected.append(3 * r + 2);
-      expected.append('}');
-      assertEquals("row " + r, expected.toString(),
-          row.getFieldValue(15).toString());
+          new Timestamp(115, 9, 25, 10, 11, 59 + r, 999999999),
+          times.asScratchTimestamp(r));
+      assertEquals("row " + r, "1.234567", decs.vector[r].toString());
+      assertEquals("row " + r, Integer.toString(r), strs.toString(r));
+      assertEquals("row " + r, Integer.toHexString(r), chars.toString(r));
+      assertEquals("row " + r, Integer.toHexString(r * 128), vcs.toString(r));
+      assertEquals("row " + r, r + 13, structInts.vector[r]);
+      assertEquals("row " + r, 1, unions.tags[r]);
+      assertEquals("row " + r, r + 42, unionInts.vector[r]);
+      assertEquals("row " + r, 3, lists.lengths[r]);
+      assertEquals("row " + r, 31415, listInts.vector[(int) lists.offsets[r]]);
+      assertEquals("row " + r, 31416, listInts.vector[(int) lists.offsets[r] + 1]);
+      assertEquals("row " + r, 31417, listInts.vector[(int) lists.offsets[r] + 2]);
+      assertEquals("row " + r, 3, maps.lengths[3]);
+      assertEquals("row " + r, Integer.toHexString(3 * r), mapKeys.toString((int) maps.offsets[r]));
+      assertEquals("row " + r, Integer.toString(3 * r), mapValues.toString((int) maps.offsets[r]));
+      assertEquals("row " + r, Integer.toHexString(3 * r + 1), mapKeys.toString((int) maps.offsets[r] + 1));
+      assertEquals("row " + r, Integer.toString(3 * r + 1), mapValues.toString((int) maps.offsets[r] + 1));
+      assertEquals("row " + r, Integer.toHexString(3 * r + 2), mapKeys.toString((int) maps.offsets[r] + 2));
+      assertEquals("row " + r, Integer.toString(3 * r + 2), mapValues.toString((int) maps.offsets[r] + 2));
     }
 
     // should have no more rows
-    assertEquals(false, rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
   }
 
   private static String makeString(BytesColumnVector vector, int row) {
@@ -2455,7 +2449,8 @@ public class TestVectorOrcFile {
     Reader reader = OrcFile.createReader(testFilePath,
         OrcFile.readerOptions(conf));
     RecordReader rows = reader.rows();
-    batch = rows.nextBatch(null);
+    batch = reader.getSchema().createRowBatch();
+    assertEquals(true, rows.nextBatch(batch));
     assertEquals(4, batch.size);
     // ORC currently trims the output strings. See HIVE-12286
     assertEquals("",
@@ -2504,19 +2499,20 @@ public class TestVectorOrcFile {
     Reader reader = OrcFile.createReader(testFilePath,
         OrcFile.readerOptions(conf));
     RecordReader rows = reader.rows();
-    batch = rows.nextBatch(null);
+    batch = reader.getSchema().createRowBatch();
+    assertEquals(true, rows.nextBatch(batch));
     assertEquals(1024, batch.size);
     for(int r=0; r < 1024; ++r) {
       assertEquals(Integer.toString(r * 10001),
           makeString((BytesColumnVector) batch.cols[0], r));
     }
-    batch = rows.nextBatch(batch);
+    assertEquals(true, rows.nextBatch(batch));
     assertEquals(1024, batch.size);
     for(int r=0; r < 1024; ++r) {
       assertEquals("Halloween",
           makeString((BytesColumnVector) batch.cols[0], r));
     }
-    assertEquals(false, rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
   }
 
   @Test
@@ -2541,18 +2537,21 @@ public class TestVectorOrcFile {
     Reader reader = OrcFile.createReader(testFilePath,
         OrcFile.readerOptions(conf));
     RecordReader rows = reader.rows();
-    OrcStruct row = null;
+    batch = reader.getSchema().createRowBatch();
+    rows.nextBatch(batch);
+    assertEquals(1024, batch.size);
+    StructColumnVector inner = (StructColumnVector) batch.cols[0];
+    LongColumnVector vec = (LongColumnVector) inner.fields[0];
     for(int r=0; r < 1024; ++r) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      OrcStruct inner = (OrcStruct) row.getFieldValue(0);
       if (r < 200 || (r >= 400 && r < 600) || r >= 800) {
-        assertEquals("row " + r, null, inner);
+        assertEquals("row " + r, true, inner.isNull[r]);
       } else {
-        assertEquals("row " + r, "{" + r + "}", inner.toString());
+        assertEquals("row " + r, false, inner.isNull[r]);
+        assertEquals("row " + r, r, vec.vector[r]);
       }
     }
-    assertEquals(false, rows.hasNext());
+    rows.nextBatch(batch);
+    assertEquals(0, batch.size);
   }
 
   /**
@@ -2595,28 +2594,38 @@ public class TestVectorOrcFile {
     Reader reader = OrcFile.createReader(testFilePath,
         OrcFile.readerOptions(conf));
     RecordReader rows = reader.rows();
-    OrcStruct row = null;
+    batch = reader.getSchema().createRowBatch(1024);
+    UnionColumnVector union = (UnionColumnVector) batch.cols[0];
+    LongColumnVector ints = (LongColumnVector) union.fields[0];
+    LongColumnVector longs = (LongColumnVector) union.fields[1];
+    assertEquals(true, rows.nextBatch(batch));
+    assertEquals(1024, batch.size);
     for(int r=0; r < 1024; ++r) {
-      assertEquals(true, rows.hasNext());
-      row = (OrcStruct) rows.next(row);
-      OrcUnion inner = (OrcUnion) row.getFieldValue(0);
       if (r < 200) {
-        assertEquals("row " + r, null, inner);
+        assertEquals("row " + r, true, union.isNull[r]);
       } else if (r < 300) {
-        assertEquals("row " + r, "union(0, " + r +")", inner.toString());
+        assertEquals("row " + r, false, union.isNull[r]);
+        assertEquals("row " + r, 0, union.tags[r]);
+        assertEquals("row " + r, r, ints.vector[r]);
       } else if (r < 400) {
-        assertEquals("row " + r, "union(1, " + -r +")", inner.toString());
+        assertEquals("row " + r, false, union.isNull[r]);
+        assertEquals("row " + r, 1, union.tags[r]);
+        assertEquals("row " + r, -r, longs.vector[r]);
       } else if (r < 600) {
-        assertEquals("row " + r, null, inner);
+        assertEquals("row " + r, true, union.isNull[r]);
       } else if (r < 800) {
-        assertEquals("row " + r, "union(1, " + -r +")", inner.toString());
+        assertEquals("row " + r, false, union.isNull[r]);
+        assertEquals("row " + r, 1, union.tags[r]);
+        assertEquals("row " + r, -r, longs.vector[r]);
       } else if (r < 1000) {
-        assertEquals("row " + r, null, inner);
+        assertEquals("row " + r, true, union.isNull[r]);
       } else {
-        assertEquals("row " + r, "union(1, " + -r +")", inner.toString());
+        assertEquals("row " + r, false, union.isNull[r]);
+        assertEquals("row " + r, 1, union.tags[r]);
+        assertEquals("row " + r, -r, longs.vector[r]);
       }
     }
-    assertEquals(false, rows.hasNext());
+    assertEquals(false, rows.nextBatch(batch));
   }
 
   /**
@@ -2663,31 +2672,33 @@ public class TestVectorOrcFile {
     Reader r

<TRUNCATED>

[29/58] [abbrv] hive git commit: HIVE-13380 : Decimal should have lower precedence than double in type hierachy (Ashutosh Chauhan via Jason Dere)

Posted by jd...@apache.org.
HIVE-13380 : Decimal should have lower precedence than double in type hierachy (Ashutosh Chauhan via Jason Dere)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b507520e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b507520e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b507520e

Branch: refs/heads/llap
Commit: b507520e17811c8e059aa7d30490d29b984e2e96
Parents: e16bcca
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Tue Mar 29 18:14:43 2016 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Tue Apr 12 18:04:43 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/udf/UDFSign.java  | 15 +++++++++++
 .../hive/ql/exec/TestFunctionRegistry.java      | 26 ++++++++++----------
 .../ql/udf/generic/TestGenericUDFOPMinus.java   |  4 +--
 .../udf/generic/TestGenericUDFOPMultiply.java   |  4 +--
 .../ql/udf/generic/TestGenericUDFOPPlus.java    |  4 +--
 .../clientpositive/alter_partition_change_col.q |  1 +
 .../clientpositive/alter_table_cascade.q        |  1 +
 .../results/clientpositive/perf/query32.q.out   |  2 +-
 .../results/clientpositive/perf/query65.q.out   |  2 +-
 .../results/clientpositive/perf/query75.q.out   |  2 +-
 .../results/clientpositive/perf/query89.q.out   |  8 +++---
 .../tez/vector_decimal_expressions.q.out        |  2 +-
 .../results/clientpositive/udf_greatest.q.out   |  4 +--
 .../test/results/clientpositive/udf_least.q.out |  4 +--
 .../vector_decimal_expressions.q.out            |  2 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     | 11 +++------
 16 files changed, 53 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
index 022b130..67d62d9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSign.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.FuncSignLongToDoubl
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
 
 @Description(name = "sign",
              value = "_FUNC_(x) - returns the sign of x )",
@@ -57,6 +58,20 @@ public class UDFSign extends UDF {
     return result;
   }
 
+  public DoubleWritable evaluate(LongWritable a) {
+    if (a == null) {
+      return null;
+    }
+    if (a.get() == 0) {
+      result.set(0);
+    } else if (a.get() > 0) {
+      result.set(1);
+    } else {
+      result.set(-1);
+    }
+    return result;
+  }
+
   /**
    * Get the sign of the decimal input
    *

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
index 8488c21..59ecd1e 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFunctionRegistry.java
@@ -23,12 +23,10 @@ import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
 
-import junit.framework.Assert;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.hive.common.type.HiveVarchar;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde2.io.DateWritable;
 import org.apache.hadoop.hive.serde2.io.DoubleWritable;
@@ -42,6 +40,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
+import org.junit.Assert;
 
 public class TestFunctionRegistry extends TestCase {
 
@@ -85,9 +84,10 @@ public class TestFunctionRegistry extends TestCase {
 
   public void testImplicitConversion() {
     implicit(TypeInfoFactory.intTypeInfo, TypeInfoFactory.decimalTypeInfo, true);
-    implicit(TypeInfoFactory.floatTypeInfo, TypeInfoFactory.decimalTypeInfo, true);
-    implicit(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.decimalTypeInfo, true);
-    implicit(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.decimalTypeInfo, true);
+    implicit(TypeInfoFactory.longTypeInfo, TypeInfoFactory.decimalTypeInfo, true);
+    implicit(TypeInfoFactory.floatTypeInfo, TypeInfoFactory.decimalTypeInfo, false);
+    implicit(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.decimalTypeInfo, false);
+    implicit(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.decimalTypeInfo, false);
     implicit(TypeInfoFactory.dateTypeInfo, TypeInfoFactory.decimalTypeInfo, false);
     implicit(TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.decimalTypeInfo, false);
     implicit(varchar10, TypeInfoFactory.stringTypeInfo, true);
@@ -185,16 +185,16 @@ public class TestFunctionRegistry extends TestCase {
   public void testGetMethodInternal() {
 
     verify(TestUDF.class, "same", TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo,
-           DoubleWritable.class, DoubleWritable.class, false);
+           HiveDecimalWritable.class, HiveDecimalWritable.class, false);
 
     verify(TestUDF.class, "same", TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.doubleTypeInfo,
            DoubleWritable.class, DoubleWritable.class, false);
 
     verify(TestUDF.class, "same", TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.decimalTypeInfo,
-           HiveDecimalWritable.class, HiveDecimalWritable.class, false);
+           DoubleWritable.class, DoubleWritable.class, false);
 
     verify(TestUDF.class, "same", TypeInfoFactory.decimalTypeInfo, TypeInfoFactory.doubleTypeInfo,
-           HiveDecimalWritable.class, HiveDecimalWritable.class, false);
+           DoubleWritable.class, DoubleWritable.class, false);
 
     verify(TestUDF.class, "same", TypeInfoFactory.decimalTypeInfo, TypeInfoFactory.decimalTypeInfo,
            HiveDecimalWritable.class, HiveDecimalWritable.class, false);
@@ -226,7 +226,7 @@ public class TestFunctionRegistry extends TestCase {
     common(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.decimalTypeInfo,
            TypeInfoFactory.stringTypeInfo);
     common(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.decimalTypeInfo,
-           TypeInfoFactory.decimalTypeInfo);
+           TypeInfoFactory.doubleTypeInfo);
     common(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.stringTypeInfo,
            TypeInfoFactory.stringTypeInfo);
 
@@ -246,9 +246,9 @@ public class TestFunctionRegistry extends TestCase {
     comparison(TypeInfoFactory.intTypeInfo, TypeInfoFactory.decimalTypeInfo,
                TypeInfoFactory.decimalTypeInfo);
     comparison(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.decimalTypeInfo,
-               TypeInfoFactory.decimalTypeInfo);
+               TypeInfoFactory.doubleTypeInfo);
     comparison(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.decimalTypeInfo,
-               TypeInfoFactory.decimalTypeInfo);
+               TypeInfoFactory.doubleTypeInfo);
     comparison(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.stringTypeInfo,
                TypeInfoFactory.doubleTypeInfo);
 
@@ -330,9 +330,9 @@ public class TestFunctionRegistry extends TestCase {
     unionAll(TypeInfoFactory.intTypeInfo, TypeInfoFactory.decimalTypeInfo,
         TypeInfoFactory.decimalTypeInfo);
     unionAll(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.decimalTypeInfo,
-        TypeInfoFactory.decimalTypeInfo);
+        TypeInfoFactory.stringTypeInfo);
     unionAll(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.decimalTypeInfo,
-        TypeInfoFactory.decimalTypeInfo);
+        TypeInfoFactory.doubleTypeInfo);
     unionAll(TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.stringTypeInfo,
         TypeInfoFactory.stringTypeInfo);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java
index 771a6c7..b060877 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMinus.java
@@ -224,7 +224,7 @@ public class TestGenericUDFOPMinus extends AbstractTestGenericUDFOPNumeric {
 
     verifyReturnType(new GenericUDFOPMinus(), "float", "float", "float");
     verifyReturnType(new GenericUDFOPMinus(), "float", "double", "double");
-    verifyReturnType(new GenericUDFOPMinus(), "float", "decimal(10,2)", "double");
+    verifyReturnType(new GenericUDFOPMinus(), "float", "decimal(10,2)", "float");
 
     verifyReturnType(new GenericUDFOPMinus(), "double", "double", "double");
     verifyReturnType(new GenericUDFOPMinus(), "double", "decimal(10,2)", "double");
@@ -246,7 +246,7 @@ public class TestGenericUDFOPMinus extends AbstractTestGenericUDFOPNumeric {
 
     verifyReturnType(new GenericUDFOPMinus(), "float", "float", "float");
     verifyReturnType(new GenericUDFOPMinus(), "float", "double", "double");
-    verifyReturnType(new GenericUDFOPMinus(), "float", "decimal(10,2)", "double");
+    verifyReturnType(new GenericUDFOPMinus(), "float", "decimal(10,2)", "float");
 
     verifyReturnType(new GenericUDFOPMinus(), "double", "double", "double");
     verifyReturnType(new GenericUDFOPMinus(), "double", "decimal(10,2)", "double");

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java
index 696682f..e342a76 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPMultiply.java
@@ -215,7 +215,7 @@ public class TestGenericUDFOPMultiply extends AbstractTestGenericUDFOPNumeric {
 
     verifyReturnType(new GenericUDFOPMultiply(), "float", "float", "float");
     verifyReturnType(new GenericUDFOPMultiply(), "float", "double", "double");
-    verifyReturnType(new GenericUDFOPMultiply(), "float", "decimal(10,2)", "double");
+    verifyReturnType(new GenericUDFOPMultiply(), "float", "decimal(10,2)", "float");
 
     verifyReturnType(new GenericUDFOPMultiply(), "double", "double", "double");
     verifyReturnType(new GenericUDFOPMultiply(), "double", "decimal(10,2)", "double");
@@ -237,7 +237,7 @@ public class TestGenericUDFOPMultiply extends AbstractTestGenericUDFOPNumeric {
 
     verifyReturnType(new GenericUDFOPMultiply(), "float", "float", "float");
     verifyReturnType(new GenericUDFOPMultiply(), "float", "double", "double");
-    verifyReturnType(new GenericUDFOPMultiply(), "float", "decimal(10,2)", "double");
+    verifyReturnType(new GenericUDFOPMultiply(), "float", "decimal(10,2)", "float");
 
     verifyReturnType(new GenericUDFOPMultiply(), "double", "double", "double");
     verifyReturnType(new GenericUDFOPMultiply(), "double", "decimal(10,2)", "double");

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java
index eba4894..b49f6ef 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPPlus.java
@@ -230,7 +230,7 @@ public class TestGenericUDFOPPlus extends AbstractTestGenericUDFOPNumeric {
 
     verifyReturnType(new GenericUDFOPPlus(), "float", "float", "float");
     verifyReturnType(new GenericUDFOPPlus(), "float", "double", "double");
-    verifyReturnType(new GenericUDFOPPlus(), "float", "decimal(10,2)", "double");
+    verifyReturnType(new GenericUDFOPPlus(), "float", "decimal(10,2)", "float");
 
     verifyReturnType(new GenericUDFOPPlus(), "double", "double", "double");
     verifyReturnType(new GenericUDFOPPlus(), "double", "decimal(10,2)", "double");
@@ -252,7 +252,7 @@ public class TestGenericUDFOPPlus extends AbstractTestGenericUDFOPNumeric {
 
     verifyReturnType(new GenericUDFOPPlus(), "float", "float", "float");
     verifyReturnType(new GenericUDFOPPlus(), "float", "double", "double");
-    verifyReturnType(new GenericUDFOPPlus(), "float", "decimal(10,2)", "double");
+    verifyReturnType(new GenericUDFOPPlus(), "float", "decimal(10,2)", "float");
 
     verifyReturnType(new GenericUDFOPPlus(), "double", "double", "double");
     verifyReturnType(new GenericUDFOPPlus(), "double", "decimal(10,2)", "double");

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/queries/clientpositive/alter_partition_change_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_partition_change_col.q b/ql/src/test/queries/clientpositive/alter_partition_change_col.q
index 6861ca2..360f4d2 100644
--- a/ql/src/test/queries/clientpositive/alter_partition_change_col.q
+++ b/ql/src/test/queries/clientpositive/alter_partition_change_col.q
@@ -1,3 +1,4 @@
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 SET hive.exec.dynamic.partition = true;
 SET hive.exec.dynamic.partition.mode = nonstrict;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/queries/clientpositive/alter_table_cascade.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/alter_table_cascade.q b/ql/src/test/queries/clientpositive/alter_table_cascade.q
index 479fda4..acca4e8 100644
--- a/ql/src/test/queries/clientpositive/alter_table_cascade.q
+++ b/ql/src/test/queries/clientpositive/alter_table_cascade.q
@@ -1,3 +1,4 @@
+set hive.metastore.disallow.incompatible.col.type.changes=false;
 SET hive.exec.dynamic.partition = true;
 SET hive.exec.dynamic.partition.mode = nonstrict;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/perf/query32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query32.q.out b/ql/src/test/results/clientpositive/perf/query32.q.out
index f9cfd69..7cfda2f 100644
--- a/ql/src/test/results/clientpositive/perf/query32.q.out
+++ b/ql/src/test/results/clientpositive/perf/query32.q.out
@@ -60,7 +60,7 @@ Stage-0
               Select Operator [SEL_32] (rows=169400 width=1436)
                 Output:["_col1"]
                 Filter Operator [FIL_31] (rows=169400 width=1436)
-                  predicate:(_col1 > CAST( _col5 AS decimal(20,15)))
+                  predicate:(UDFToDouble(_col1) > _col5)
                   Merge Join Operator [MERGEJOIN_59] (rows=508200 width=1436)
                     Conds:RS_27._col0=RS_28._col0(Inner),RS_28._col0=RS_29._col0(Inner),Output:["_col1","_col5"]
                   <-Map 6 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/perf/query65.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query65.q.out b/ql/src/test/results/clientpositive/perf/query65.q.out
index 9673373..15b2615 100644
--- a/ql/src/test/results/clientpositive/perf/query65.q.out
+++ b/ql/src/test/results/clientpositive/perf/query65.q.out
@@ -102,7 +102,7 @@ Stage-0
               Select Operator [SEL_49] (rows=372680 width=1436)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                 Filter Operator [FIL_48] (rows=372680 width=1436)
-                  predicate:(_col11 <= CAST( (0.1 * UDFToDouble(_col8)) AS decimal(30,15)))
+                  predicate:(UDFToDouble(_col11) <= (0.1 * UDFToDouble(_col8)))
                   Merge Join Operator [MERGEJOIN_73] (rows=1118040 width=1436)
                     Conds:RS_45._col7, _col0, _col2=RS_46._col0, _col0, _col1(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col8","_col11"]
                   <-Reducer 13 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/perf/query75.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query75.q.out b/ql/src/test/results/clientpositive/perf/query75.q.out
index 35729a2..15c46c2 100644
--- a/ql/src/test/results/clientpositive/perf/query75.q.out
+++ b/ql/src/test/results/clientpositive/perf/query75.q.out
@@ -43,7 +43,7 @@ Stage-0
               Select Operator [SEL_152] (rows=169103 width=1436)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
                 Filter Operator [FIL_151] (rows=169103 width=1436)
-                  predicate:((CAST( _col5 AS decimal(17,2)) / CAST( _col12 AS decimal(17,2))) < 0.9)
+                  predicate:(UDFToDouble((CAST( _col5 AS decimal(17,2)) / CAST( _col12 AS decimal(17,2)))) < 0.9)
                   Merge Join Operator [MERGEJOIN_259] (rows=507310 width=1436)
                     Conds:RS_148._col1, _col2, _col3, _col4=RS_149._col1, _col2, _col3, _col4(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col12","_col13"]
                   <-Reducer 31 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/perf/query89.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query89.q.out b/ql/src/test/results/clientpositive/perf/query89.q.out
index 165d829..75f7385 100644
--- a/ql/src/test/results/clientpositive/perf/query89.q.out
+++ b/ql/src/test/results/clientpositive/perf/query89.q.out
@@ -70,14 +70,14 @@ Stage-0
       File Output Operator [FS_36]
         Limit [LIM_35] (rows=100 width=1436)
           Number of rows:100
-          Select Operator [SEL_34] (rows=76865 width=1436)
+          Select Operator [SEL_34] (rows=51243 width=1436)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
           <-Reducer 6 [SIMPLE_EDGE]
             SHUFFLE [RS_33]
-              Select Operator [SEL_30] (rows=76865 width=1436)
+              Select Operator [SEL_30] (rows=51243 width=1436)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                Filter Operator [FIL_46] (rows=76865 width=1436)
-                  predicate:CASE WHEN ((avg_window_0 <> 0)) THEN (((abs((_col6 - avg_window_0)) / avg_window_0) > 0.1)) ELSE (null) END
+                Filter Operator [FIL_46] (rows=51243 width=1436)
+                  predicate:(UDFToDouble(CASE WHEN ((avg_window_0 <> 0)) THEN ((abs((_col6 - avg_window_0)) / avg_window_0)) ELSE (null) END) > 0.1)
                   Select Operator [SEL_29] (rows=153730 width=1436)
                     Output:["avg_window_0","_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                     PTF Operator [PTF_28] (rows=153730 width=1436)

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
index 1b21c99..e5e5b4b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
@@ -41,7 +41,7 @@ STAGE PLANS:
                   alias: decimal_test
                   Statistics: Num rows: 12288 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((cdecimal1 > 0) and (cdecimal1 < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
+                    predicate: ((cdecimal1 > 0) and (UDFToDouble(cdecimal1) < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
                     Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/udf_greatest.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_greatest.q.out b/ql/src/test/results/clientpositive/udf_greatest.q.out
index 47cfb3f..7c7e67a 100644
--- a/ql/src/test/results/clientpositive/udf_greatest.q.out
+++ b/ql/src/test/results/clientpositive/udf_greatest.q.out
@@ -183,7 +183,7 @@ FROM src tablesample (1 rows)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-1.1	1.1	1	NULL
+1.100000023841858	1.1	1.0	NULL
 PREHOOK: query: SELECT GREATEST(-100Y, -80S, -60, -40L, cast(-20 as float), cast(0 as double), cast(0.5 as decimal)),
        GREATEST(100Y, 80S, 60, 40L, cast(20 as float), cast(0 as double), cast(-0.5 as decimal)),
        GREATEST(100Y, 80S, 60, 40L, null, cast(0 as double), cast(-0.5 as decimal))
@@ -198,7 +198,7 @@ FROM src tablesample (1 rows)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-1	100	NULL
+1.0	100.0	NULL
 PREHOOK: query: SELECT GREATEST(10L, 'a', date('2001-01-28'))
 FROM src tablesample (1 rows)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/udf_least.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_least.q.out b/ql/src/test/results/clientpositive/udf_least.q.out
index 2363abe..497370e 100644
--- a/ql/src/test/results/clientpositive/udf_least.q.out
+++ b/ql/src/test/results/clientpositive/udf_least.q.out
@@ -183,7 +183,7 @@ FROM src tablesample (1 rows)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
--1.1	-1.1	-0.1	NULL
+-1.1	-1.100000023841858	-0.1	NULL
 PREHOOK: query: SELECT LEAST(-100Y, -80S, -60, -40L, cast(-20 as float), cast(0 as double), cast(0.5 as decimal)),
        LEAST(100Y, 80S, 60, 40L, cast(20 as float), cast(0 as double), cast(-0.5 as decimal)),
        LEAST(100Y, 80S, 60, 40L, null, cast(0 as double), cast(-0.5 as decimal))
@@ -198,7 +198,7 @@ FROM src tablesample (1 rows)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
--100	-1	NULL
+-100.0	-1.0	NULL
 PREHOOK: query: SELECT LEAST(10L, 'a', date('2001-01-28'))
 FROM src tablesample (1 rows)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index 03f6f35..9244efd 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -35,7 +35,7 @@ STAGE PLANS:
             alias: decimal_test
             Statistics: Num rows: 12288 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((cdecimal1 > 0) and (cdecimal1 < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
+              predicate: ((cdecimal1 > 0) and (UDFToDouble(cdecimal1) < 12345.5678) and (cdecimal2 <> 0) and (cdecimal2 > 1000) and cdouble is not null) (type: boolean)
               Statistics: Num rows: 455 Data size: 78809 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)

http://git-wip-us.apache.org/repos/asf/hive/blob/b507520e/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
index 16daecf..abd2838 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfoUtils.java
@@ -68,9 +68,9 @@ public final class TypeInfoUtils {
     registerNumericType(PrimitiveCategory.SHORT, 2);
     registerNumericType(PrimitiveCategory.INT, 3);
     registerNumericType(PrimitiveCategory.LONG, 4);
-    registerNumericType(PrimitiveCategory.FLOAT, 5);
-    registerNumericType(PrimitiveCategory.DOUBLE, 6);
-    registerNumericType(PrimitiveCategory.DECIMAL, 7);
+    registerNumericType(PrimitiveCategory.DECIMAL, 5);
+    registerNumericType(PrimitiveCategory.FLOAT, 6);
+    registerNumericType(PrimitiveCategory.DOUBLE, 7);
     registerNumericType(PrimitiveCategory.STRING, 8);
   }
 
@@ -885,10 +885,7 @@ public final class TypeInfoUtils {
     if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DOUBLE) {
       return true;
     }
-    // Allow implicit String to Decimal conversion
-    if (fromPg == PrimitiveGrouping.STRING_GROUP && to == PrimitiveCategory.DECIMAL) {
-      return true;
-    }
+
     // Void can be converted to any type
     if (from == PrimitiveCategory.VOID) {
       return true;


[50/58] [abbrv] hive git commit: HIVE-13476: HS2 ShutdownHookManager holds extra of Driver instance in nested compile

Posted by jd...@apache.org.
HIVE-13476: HS2 ShutdownHookManager holds extra of Driver instance in nested compile


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ba31f9e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ba31f9e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ba31f9e

Branch: refs/heads/llap
Commit: 2ba31f9e1699a9ce40b5fb3724ee3e4ba2c4f8c4
Parents: d567773
Author: Daniel Dai <da...@hortonworks.com>
Authored: Thu Apr 14 15:55:33 2016 -0700
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Thu Apr 14 15:55:33 2016 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2ba31f9e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
index 95b7755..338c185 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/IndexUtils.java
@@ -238,6 +238,8 @@ public final class IndexUtils {
     indexMetaChangeTsk.setWork(indexMetaChange);
     rootTask.addDependentTask(indexMetaChangeTsk);
 
+    driver.destroy();
+
     return rootTask;
   }
 


[55/58] [abbrv] hive git commit: HIVE-13475: Allow aggregate functions in over clause (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13475: Allow aggregate functions in over clause (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6a776f59
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6a776f59
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6a776f59

Branch: refs/heads/llap
Commit: 6a776f5998b1fc41c602b135c9e1ef04171f4b74
Parents: b30fe72
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Apr 15 12:59:39 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Apr 15 13:24:50 2016 +0100

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   5 +
 .../queries/clientpositive/windowing_gby2.q     |  41 ++
 .../results/clientpositive/windowing_gby2.q.out | 652 +++++++++++++++++++
 3 files changed, 698 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/6a776f59/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index d3e7040..329c617 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -566,7 +566,12 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       assert (expressionTree.getChildCount() != 0);
       if (expressionTree.getChild(expressionTree.getChildCount()-1).getType()
           == HiveParser.TOK_WINDOWSPEC) {
+        // If it is a windowing spec, we include it in the list
+        // Further, we will examine its children AST nodes to check whether
+        // there are aggregation functions within
         wdwFns.add(expressionTree);
+        doPhase1GetAllAggregations((ASTNode) expressionTree.getChild(expressionTree.getChildCount()-1),
+                aggregations, wdwFns);
         return;
       }
       if (expressionTree.getChild(0).getType() == HiveParser.Identifier) {

http://git-wip-us.apache.org/repos/asf/hive/blob/6a776f59/ql/src/test/queries/clientpositive/windowing_gby2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/windowing_gby2.q b/ql/src/test/queries/clientpositive/windowing_gby2.q
new file mode 100644
index 0000000..920f723
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/windowing_gby2.q
@@ -0,0 +1,41 @@
+set hive.mapred.mode=nonstrict;
+
+explain
+select rank() over (order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by ws.key;
+
+select rank() over (order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by ws.key;
+
+explain
+select avg(cast(ws.key as int)) over (partition by min(ws.value) order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by cast(ws.key as int);
+
+select avg(cast(ws.key as int)) over (partition by min(ws.value) order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by cast(ws.key as int);
+
+explain
+select rank () over(partition by key order by sum(c_int - c_float) desc) ,
+dense_rank () over(partition by lower(value) order by sum(c_float/c_int) asc),
+percent_rank () over(partition by max(c_int) order by sum((c_float/c_int) - c_int) asc)
+from cbo_t3
+group by key, value;
+
+select rank () over(partition by key order by sum(c_int - c_float) desc) ,
+dense_rank () over(partition by lower(value) order by sum(c_float/c_int) asc),
+percent_rank () over(partition by max(c_int) order by sum((c_float/c_int) - c_int) asc)
+from cbo_t3
+group by key, value;
+
+explain
+select rank() over (order by sum(wr.cint)/sum(ws.c_int)) as return_rank
+from cbo_t3 ws join alltypesorc wr on ws.value = wr.cstring1
+group by ws.c_boolean;
+
+select rank() over (order by sum(wr.cint)/sum(ws.c_int)) as return_rank
+from cbo_t3 ws join alltypesorc wr on ws.value = wr.cstring1
+group by ws.c_boolean;

http://git-wip-us.apache.org/repos/asf/hive/blob/6a776f59/ql/src/test/results/clientpositive/windowing_gby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_gby2.q.out b/ql/src/test/results/clientpositive/windowing_gby2.q.out
new file mode 100644
index 0000000..4bd6994
--- /dev/null
+++ b/ql/src/test/results/clientpositive/windowing_gby2.q.out
@@ -0,0 +1,652 @@
+PREHOOK: query: explain
+select rank() over (order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by ws.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select rank() over (order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by ws.key
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: ws
+            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), c_int (type: int)
+              outputColumnNames: key, c_int
+              Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: sum(c_int)
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint)
+            outputColumnNames: _col1
+            Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: 0 (type: int), _col1 (type: bigint)
+              sort order: ++
+              Map-reduce partition columns: 0 (type: int)
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey1 (type: bigint)
+          outputColumnNames: _col1
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col1: bigint
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col1 ASC NULLS FIRST
+                  partition by: 0
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: rank_window_0
+                        arguments: _col1
+                        name: rank
+                        window function: GenericUDAFRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
+            Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: rank_window_0 (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select rank() over (order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by ws.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select rank() over (order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by ws.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1
+2
+2
+2
+5
+5
+7
+PREHOOK: query: explain
+select avg(cast(ws.key as int)) over (partition by min(ws.value) order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by cast(ws.key as int)
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select avg(cast(ws.key as int)) over (partition by min(ws.value) order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by cast(ws.key as int)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: ws
+            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: UDFToInteger(key) (type: int), value (type: string), c_int (type: int)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: min(_col1), sum(_col2)
+                keys: _col0 (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: string), _col2 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0), sum(VALUE._col1)
+          keys: KEY._col0 (type: int)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col1 (type: string), _col2 (type: bigint)
+              sort order: ++
+              Map-reduce partition columns: _col1 (type: string)
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: int)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint)
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: int, _col1: string, _col2: bigint
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col2 ASC NULLS FIRST
+                  partition by: _col1
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: avg_window_0
+                        arguments: _col0
+                        name: avg
+                        window function: GenericUDAFAverageEvaluatorDouble
+                        window frame: PRECEDING(MAX)~
+            Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: avg_window_0 (type: double)
+              outputColumnNames: _col0
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select avg(cast(ws.key as int)) over (partition by min(ws.value) order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by cast(ws.key as int)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select avg(cast(ws.key as int)) over (partition by min(ws.value) order by sum(ws.c_int)) as return_rank
+from cbo_t3 ws
+group by cast(ws.key as int)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+NULL
+1.0
+2.0
+3.0
+PREHOOK: query: explain
+select rank () over(partition by key order by sum(c_int - c_float) desc) ,
+dense_rank () over(partition by lower(value) order by sum(c_float/c_int) asc),
+percent_rank () over(partition by max(c_int) order by sum((c_float/c_int) - c_int) asc)
+from cbo_t3
+group by key, value
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select rank () over(partition by key order by sum(c_int - c_float) desc) ,
+dense_rank () over(partition by lower(value) order by sum(c_float/c_int) asc),
+percent_rank () over(partition by max(c_int) order by sum((c_float/c_int) - c_int) asc)
+from cbo_t3
+group by key, value
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-2
+  Stage-4 depends on stages: Stage-3
+  Stage-0 depends on stages: Stage-4
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: cbo_t3
+            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: key (type: string), value (type: string), (UDFToFloat(c_int) - c_float) (type: float), (UDFToDouble(c_float) / UDFToDouble(c_int)) (type: double), c_int (type: int), ((UDFToDouble(c_float) / UDFToDouble(c_int)) - UDFToDouble(c_int)) (type: double)
+              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: sum(_col2), sum(_col3), max(_col4), sum(_col5)
+                keys: _col0 (type: string), _col1 (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                  Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col2 (type: double), _col3 (type: double), _col4 (type: int), _col5 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0), sum(VALUE._col1), max(VALUE._col2), sum(VALUE._col3)
+          keys: KEY._col0 (type: string), KEY._col1 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: string), _col2 (type: double)
+              sort order: +-
+              Map-reduce partition columns: _col0 (type: string)
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: string), _col3 (type: double), _col4 (type: int), _col5 (type: double)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey1 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: int), VALUE._col3 (type: double)
+          outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: string, _col1: string, _col2: double, _col3: double, _col4: int, _col5: double
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col2 DESC NULLS LAST
+                  partition by: _col0
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: rank_window_0
+                        arguments: _col2
+                        name: rank
+                        window function: GenericUDAFRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
+            Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col1 (type: string), _col3 (type: double), _col4 (type: int), _col5 (type: double), rank_window_0 (type: int)
+              outputColumnNames: _col1, _col3, _col4, _col5, rank_window_0
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: lower(_col1) (type: string), _col3 (type: double)
+              sort order: ++
+              Map-reduce partition columns: lower(_col1) (type: string)
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              value expressions: rank_window_0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: double)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: int), VALUE._col2 (type: string), KEY.reducesinkkey1 (type: double), VALUE._col4 (type: int), VALUE._col5 (type: double)
+          outputColumnNames: _col0, _col2, _col4, _col5, _col6
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: int, _col2: string, _col4: double, _col5: int, _col6: double
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col4 ASC NULLS FIRST
+                  partition by: lower(_col2)
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: dense_rank_window_1
+                        arguments: _col4
+                        name: dense_rank
+                        window function: GenericUDAFDenseRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
+            Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col0 (type: int), _col5 (type: int), _col6 (type: double), dense_rank_window_1 (type: int)
+              outputColumnNames: _col0, _col5, _col6, dense_rank_window_1
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-4
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col5 (type: int), _col6 (type: double)
+              sort order: ++
+              Map-reduce partition columns: _col5 (type: int)
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              value expressions: dense_rank_window_1 (type: int), _col0 (type: int)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col0 (type: int), VALUE._col1 (type: int), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: double)
+          outputColumnNames: _col0, _col1, _col6, _col7
+          Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col0: int, _col1: int, _col6: int, _col7: double
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: _col7 ASC NULLS FIRST
+                  partition by: _col6
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: percent_rank_window_2
+                        arguments: _col7
+                        name: percent_rank
+                        window function: GenericUDAFPercentRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
+            Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: _col1 (type: int), _col0 (type: int), percent_rank_window_2 (type: double)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 10 Data size: 131 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select rank () over(partition by key order by sum(c_int - c_float) desc) ,
+dense_rank () over(partition by lower(value) order by sum(c_float/c_int) asc),
+percent_rank () over(partition by max(c_int) order by sum((c_float/c_int) - c_int) asc)
+from cbo_t3
+group by key, value
+PREHOOK: type: QUERY
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select rank () over(partition by key order by sum(c_int - c_float) desc) ,
+dense_rank () over(partition by lower(value) order by sum(c_float/c_int) asc),
+percent_rank () over(partition by max(c_int) order by sum((c_float/c_int) - c_int) asc)
+from cbo_t3
+group by key, value
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+1	1	0.0
+1	1	0.0
+1	1	0.0
+1	1	0.0
+1	1	0.0
+1	1	0.0
+1	1	0.0
+PREHOOK: query: explain
+select rank() over (order by sum(wr.cint)/sum(ws.c_int)) as return_rank
+from cbo_t3 ws join alltypesorc wr on ws.value = wr.cstring1
+group by ws.c_boolean
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select rank() over (order by sum(wr.cint)/sum(ws.c_int)) as return_rank
+from cbo_t3 ws join alltypesorc wr on ws.value = wr.cstring1
+group by ws.c_boolean
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-3 depends on stages: Stage-2
+  Stage-0 depends on stages: Stage-3
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: ws
+            Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: value is not null (type: boolean)
+              Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: value (type: string), c_int (type: int), c_boolean (type: boolean)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: int), _col2 (type: boolean)
+          TableScan
+            alias: wr
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: cstring1 is not null (type: boolean)
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: cint (type: int), cstring1 (type: string)
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col1 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
+                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: int)
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col0 (type: string)
+            1 _col1 (type: string)
+          outputColumnNames: _col1, _col2, _col3
+          Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col2 (type: boolean), _col3 (type: int), _col1 (type: int)
+            outputColumnNames: _col2, _col3, _col1
+            Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE
+            Group By Operator
+              aggregations: sum(_col3), sum(_col1)
+              keys: _col2 (type: boolean)
+              mode: hash
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: _col0 (type: boolean)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: boolean)
+              Statistics: Num rows: 13516 Data size: 2906160 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: bigint), _col2 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: sum(VALUE._col0), sum(VALUE._col1)
+          keys: KEY._col0 (type: boolean)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1, _col2
+          Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col1 (type: bigint), _col2 (type: bigint)
+            outputColumnNames: _col1, _col2
+            Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-3
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              key expressions: 0 (type: int), (UDFToDouble(_col1) / UDFToDouble(_col2)) (type: double)
+              sort order: ++
+              Map-reduce partition columns: 0 (type: int)
+              Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: bigint), _col2 (type: bigint)
+      Reduce Operator Tree:
+        Select Operator
+          expressions: VALUE._col1 (type: bigint), VALUE._col2 (type: bigint)
+          outputColumnNames: _col1, _col2
+          Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+          PTF Operator
+            Function definitions:
+                Input definition
+                  input alias: ptf_0
+                  output shape: _col1: bigint, _col2: bigint
+                  type: WINDOWING
+                Windowing table definition
+                  input alias: ptf_1
+                  name: windowingtablefunction
+                  order by: (UDFToDouble(_col1) / UDFToDouble(_col2)) ASC NULLS FIRST
+                  partition by: 0
+                  raw input shape:
+                  window functions:
+                      window function definition
+                        alias: rank_window_0
+                        arguments: (UDFToDouble(_col1) / UDFToDouble(_col2))
+                        name: rank
+                        window function: GenericUDAFRankEvaluator
+                        window frame: PRECEDING(MAX)~FOLLOWING(MAX)
+                        isPivotResult: true
+            Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: rank_window_0 (type: int)
+              outputColumnNames: _col0
+              Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+              File Output Operator
+                compressed: false
+                Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select rank() over (order by sum(wr.cint)/sum(ws.c_int)) as return_rank
+from cbo_t3 ws join alltypesorc wr on ws.value = wr.cstring1
+group by ws.c_boolean
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####
+POSTHOOK: query: select rank() over (order by sum(wr.cint)/sum(ws.c_int)) as return_rank
+from cbo_t3 ws join alltypesorc wr on ws.value = wr.cstring1
+group by ws.c_boolean
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Input: default@cbo_t3
+#### A masked pattern was here ####


[42/58] [abbrv] hive git commit: HIVE-13496. Create initial test data once across multiple test runs - TestCliDriver. (Siddharth Seth, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13496. Create initial test data once across multiple test runs - TestCliDriver. (Siddharth Seth, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/976e628f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/976e628f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/976e628f

Branch: refs/heads/llap
Commit: 976e628fc01911936caa19e61ea3342f3a19455a
Parents: 0dd4621
Author: Siddharth Seth <ss...@apache.org>
Authored: Thu Apr 14 10:25:53 2016 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Thu Apr 14 10:25:53 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/QTestUtil.java    | 247 +++++++++++++++++--
 pom.xml                                         |   2 +
 ql/src/test/templates/TestCliDriver.vm          |  38 ++-
 3 files changed, 265 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/976e628f/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 2f109ab..79646cd 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -51,7 +51,6 @@ import java.util.Collection;
 import java.util.Comparator;
 import java.util.Deque;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -63,6 +62,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.StringUtils;
@@ -84,9 +84,7 @@ import org.apache.hadoop.hive.common.io.SortPrintStream;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.LlapItUtils;
-import org.apache.hadoop.hive.llap.configuration.LlapDaemonConfiguration;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
-import org.apache.hadoop.hive.llap.daemon.impl.LlapDaemon;
 import org.apache.hadoop.hive.llap.io.api.LlapProxy;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Index;
@@ -99,6 +97,7 @@ import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
 import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
@@ -139,6 +138,8 @@ public class QTestUtil {
   // security property names
   private static final String SECURITY_KEY_PROVIDER_URI_NAME = "dfs.encryption.key.provider.uri";
   private static final String CRLF = System.getProperty("line.separator");
+  private static final String TEST_BUILD_DIR = System.getProperty("test.build.dir");
+  private static final String CACHED_DATA_DIR_NAME = "cachedData";
 
   private static final Logger LOG = LoggerFactory.getLogger("QTestUtil");
   private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
@@ -183,6 +184,16 @@ public class QTestUtil {
   private final String cleanupScript;
   private boolean useHBaseMetastore = false;
 
+  // Parameters which help tracking cached data generation.
+  private final String driverName;
+  private Path cachedDataPath;
+  private String metaStorePathString;
+  private Path metaStorePath;
+  private FileSystem localFs;
+  private boolean attemptingCacheUsage;
+
+  private boolean dbEtcSetup = false;
+
   public interface SuiteAddTestFunctor {
     public void addTestToSuite(TestSuite suite, Object setup, String tName);
   }
@@ -378,11 +389,34 @@ public class QTestUtil {
   }
 
   public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
+                   String confDir, String hadoopVer, String initScript, String cleanupScript,
+                   boolean useHBaseMetastore, boolean withLlapIo) throws Exception {
+    // For now, to avoid changing multiple test templates, a null driver name avoids
+    // data generation optimizations.
+    this(outDir, logDir, clusterType, confDir, hadoopVer, initScript, cleanupScript,
+        useHBaseMetastore, withLlapIo, null);
+  }
+
+  public QTestUtil(String outDir, String logDir, MiniClusterType clusterType,
       String confDir, String hadoopVer, String initScript, String cleanupScript,
-      boolean useHBaseMetastore, boolean withLlapIo)
+      boolean useHBaseMetastore, boolean withLlapIo, String driverName)
     throws Exception {
+    this.attemptingCacheUsage = (StringUtils.isEmpty(TEST_BUILD_DIR) ||
+        StringUtils.isEmpty(driverName) || useHBaseMetastore) ? false : true;
+    this.driverName = driverName;
     this.outDir = outDir;
     this.logDir = logDir;
+    LOG.info("Creating QTestUtil with settings: "
+        + "driverName=" + driverName
+        + ", attemptingCacheUsage=" + attemptingCacheUsage
+        + ", test.build.dir=" + System.getProperty("test.build.dir")
+        + ", useHbaseMetaStore=" + useHBaseMetastore
+        + ", withLlapIo=" + withLlapIo
+        + ", confDir=" + confDir
+        + ", outDir=" + outDir
+        + ", logDir=" + logDir
+        + ", initScript=" + initScript
+        + ", cleanupScript=" + cleanupScript);
     this.useHBaseMetastore = useHBaseMetastore;
 
     if (confDir != null && !confDir.isEmpty()) {
@@ -471,6 +505,7 @@ public class QTestUtil {
     if (scriptsDir == null) {
       scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
     }
+    LOG.info("Using DataDir=" + dataDir + ", ScriptsDir=" + scriptsDir);
 
     this.initScript = scriptsDir + File.separator + initScript;
     this.cleanupScript = scriptsDir + File.separator + cleanupScript;
@@ -832,6 +867,17 @@ public class QTestUtil {
       return;
     }
 
+    if (!attemptingCacheUsage) {
+      cleanupNonCacheUsage();
+    } else {
+      cleanupCacheUsage();
+    }
+
+    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
+    FunctionRegistry.unregisterTemporaryUDF("test_error");
+  }
+
+  private void cleanupNonCacheUsage() throws Exception {
     clearTablesCreatedDuringTests();
     clearKeysCreatedInTests();
 
@@ -849,21 +895,42 @@ public class QTestUtil {
       LOG.info("No cleanup script detected. Skipping.");
     }
 
+    cleanupWarehouseDir();
+  }
+
+  private void cleanupCacheUsage() throws IOException {
+    // Remove the Warehouse and metastore directories completely.
+    // Also close the current db, since files are going to come in to replace it soon.
+    Preconditions.checkState(attemptingCacheUsage);
+    Preconditions.checkNotNull(metaStorePath);
+    Preconditions.checkNotNull(localFs);
+    Hive.closeCurrent();
+    cleanupMetastoreDir();
+    cleanupWarehouseDir();
+  }
+
+  private void cleanupWarehouseDir() throws IOException {
     // delete any contents in the warehouse dir
     Path p = new Path(testWarehouse);
     FileSystem fs = p.getFileSystem(conf);
 
     try {
-      FileStatus [] ls = fs.listStatus(p);
-      for (int i=0; (ls != null) && (i<ls.length); i++) {
+      FileStatus[] ls = fs.listStatus(p);
+      for (int i = 0; (ls != null) && (i < ls.length); i++) {
         fs.delete(ls[i].getPath(), true);
       }
     } catch (FileNotFoundException e) {
       // Best effort
     }
+  }
 
-    FunctionRegistry.unregisterTemporaryUDF("test_udaf");
-    FunctionRegistry.unregisterTemporaryUDF("test_error");
+  private void cleanupMetastoreDir() throws IOException {
+    try {
+      LOG.info("Cleaning up metastore Dir: {}", metaStorePath);
+      localFs.delete(metaStorePath, true);
+    } catch (FileNotFoundException e) {
+      // Best effort
+    }
   }
 
   protected void runCreateTableCmd(String createTableCmd) throws Exception {
@@ -893,6 +960,10 @@ public class QTestUtil {
   }
 
   public void createSources(String tname) throws Exception {
+    createSources(tname, false);
+  }
+
+  public void createSources(String tname, boolean forceCreate) throws Exception {
     boolean canReuseSession = (tname == null) || !qNoSessionReuseQuerySet.contains(tname);
     if(!isSessionStateStarted) {
       startSessionState(canReuseSession);
@@ -901,34 +972,173 @@ public class QTestUtil {
     if(cliDriver == null) {
       cliDriver = new CliDriver();
     }
-    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+
     File scriptFile = new File(this.initScript);
     if (!scriptFile.isFile()) {
       LOG.info("No init script detected. Skipping");
+      if (attemptingCacheUsage) {
+        setupDbsEtc(true, true);
+      }
       return;
     }
-    conf.setBoolean("hive.test.init.phase", true);
 
+    if (!attemptingCacheUsage || forceCreate) {
+      LOG.info("Creating sources without data caching. attemptingCacheUsage={}, forceCreate={}",
+          attemptingCacheUsage, forceCreate);
+      cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+      conf.setBoolean("hive.test.init.phase", true);
+      createSourcesNonCached(scriptFile);
+    } else {
+      LOG.info("Creating sources with data caching");
+      createSourcesCached(scriptFile);
+    }
+
+    conf.setBoolean("hive.test.init.phase", false);
+  }
+
+  private void createSourcesNonCached(File scriptFile) throws IOException {
     String initCommands = readEntireFileIntoString(scriptFile);
     LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
 
     cliDriver.processLine(initCommands);
+  }
 
-    conf.setBoolean("hive.test.init.phase", false);
+  private void createSourcesCached(File scriptFile) throws IOException, HiveException {
+
+    // First check if the cache already exists. If it does just copy it over.
+    Path cachedWarehousePath = new Path(cachedDataPath, "warehouse");
+    Path cachedMetaStorePtah = new Path(cachedDataPath, "metastore");
+    if (localFs.exists(cachedDataPath)) {
+      if (localFs.exists(cachedWarehousePath) && localFs.exists(cachedMetaStorePtah)) {
+        LOG.info("Cached data found in {}. Attempting to use it", cachedDataPath);
+        // Data is alredy cached
+        // Copy the files over to where they should be
+        Path warehousePath = new Path(testWarehouse);
+        FileSystem warehouseFs = warehousePath.getFileSystem(conf);
+        try {
+          warehouseFs.delete(warehousePath, false);
+        } catch (FileNotFoundException e) {
+          // Does not matter if it does not exist.
+        }
+        warehouseFs.copyFromLocalFile(false, cachedWarehousePath, warehousePath);
+
+        try {
+          localFs.delete(metaStorePath, false);
+        } catch (IOException e) {
+          // Does not matter if it does not exist.
+        }
+        localFs.copyFromLocalFile(false, cachedMetaStorePtah, metaStorePath);
+        setupDbsEtc(true, false);
+        cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+        conf.setBoolean("hive.test.init.phase", true);
+
+        return;
+      } else {
+        // Something is missing. Cleanup. Re-generate and cache
+        LOG.info("Partial or no cached data found at {}. Cache will be created", cachedDataPath);
+        localFs.delete(cachedDataPath, true);
+      }
+    } else {
+      LOG.info("No cached data found at {}. Cache will be created", cachedDataPath);
+      // No caching. Re-generate the data and cache it.
+    }
+
+    // Generate and cache the data
+    setupDbsEtc(true, true);
+    cliDriver.processLine("set test.data.dir=" + testFiles + ";");
+    conf.setBoolean("hive.test.init.phase", true);
+    createSourcesNonCached(scriptFile);
+
+    // Close the DB so that contents can be copied out safely.
+    Hive.closeCurrent();
+
+    // Cache the sources
+    localFs.mkdirs(cachedDataPath);
+
+    Path warehousePath = new Path(testWarehouse);
+    FileSystem warehouseFs = warehousePath.getFileSystem(conf);
+
+    warehouseFs.copyToLocalFile(false, warehousePath, cachedWarehousePath, true);
+    localFs.copyToLocalFile(false, metaStorePath, cachedMetaStorePtah, true);
+
+    // Re-open the DB etc.
+    setupDbsEtc(true, false);
   }
 
-  public void init() throws Exception {
+  private static final Pattern metaStoreUriPattern =
+      Pattern.compile("derby.*?databaseName=(.*?)(;|$)");
 
+  private String getDerbyDbPath(String jdbcConnectString) {
+    if (StringUtils.isEmpty(jdbcConnectString)) {
+      return null;
+    }
+    Matcher matcher = metaStoreUriPattern.matcher(jdbcConnectString);
+    if (matcher.find()) {
+      return matcher.group(1);
+    } else {
+      return null;
+    }
+  }
+
+  public void init() throws Exception {
+    LOG.info("init");
     testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);
+    LOG.info("TestWarehouseDir set to: [{}]", testWarehouse);
+    if (attemptingCacheUsage) {
+      // The derby path comes from METASTORECONNECTURLKEY. Default ends up being target/junit_metastore_db
+      String metaStoreConnectUrl = conf.getVar(ConfVars.METASTORECONNECTURLKEY);
+      LOG.info("MetastoreConnectUrl: " + metaStoreConnectUrl);
+      metaStorePathString = getDerbyDbPath(metaStoreConnectUrl);
+
+      if (metaStorePathString == null) {
+        LOG.warn(
+            "Disabling attempted cache usage since metastore path cannot be determined from {}",
+            metaStoreConnectUrl);
+        attemptingCacheUsage = false;
+      } else {
+        LOG.info("Metastore url path: " + metaStorePathString);
+        metaStorePath = new Path(metaStorePathString);
+        if (metaStorePath.isAbsolute() && metaStorePathString.split(File.separator).length >= 3) {
+          // Turn this on only if the path is absolute, and is at least 3 deep - since we'll be deleting files later.
+          localFs = FileSystem.getLocal(conf).getRaw();
+          assert(TEST_BUILD_DIR != null);
+          cachedDataPath = new Path(TEST_BUILD_DIR, CACHED_DATA_DIR_NAME);
+          cachedDataPath = new Path(cachedDataPath, driverName);
+          LOG.info("Using cachedDataPath: " + cachedDataPath);
+        } else {
+          LOG.warn(
+              "Disableing attempted cache usage since metastore path may not be absolute, or depth is < 3. MetaStorePath={}",
+              metaStorePathString);
+          metaStorePath = null;
+          attemptingCacheUsage = false;
+        }
+
+      }
+    }
     String execEngine = conf.get("hive.execution.engine");
     conf.set("hive.execution.engine", "mr");
     SessionState.start(conf);
     conf.set("hive.execution.engine", execEngine);
-    db = Hive.get(conf);
-    drv = new Driver(conf);
-    drv.init();
-    pd = new ParseDriver();
-    sem = new SemanticAnalyzer(conf);
+
+    if (!attemptingCacheUsage) {
+      setupDbsEtc(true, true);
+    }
+  }
+
+  private void setupDbsEtc(boolean force, boolean isNewDb) throws HiveException {
+    if (!dbEtcSetup || force) {
+      if (isNewDb) {
+        db = Hive.get(conf);
+      } else {
+        db = Hive.getWithFastCheck(conf, false);
+      }
+      LOG.info("Obtained db");
+      drv = new Driver(conf);
+      drv.init();
+      pd = new ParseDriver();
+      sem = new SemanticAnalyzer(conf);
+      dbEtcSetup = true;
+    }
   }
 
   public void init(String tname) throws Exception {
@@ -944,8 +1154,9 @@ public class QTestUtil {
   public String cliInit(String tname, boolean recreate) throws Exception {
     if (recreate) {
       cleanUp(tname);
-      createSources(tname);
+      createSources(tname, true);
     }
+    setupDbsEtc(false, true);
 
     HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER,
     "org.apache.hadoop.hive.ql.security.DummyAuthenticator");

http://git-wip-us.apache.org/repos/asf/hive/blob/976e628f/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 77cfaeb..08ef998 100644
--- a/pom.xml
+++ b/pom.xml
@@ -75,6 +75,7 @@
     <test.hive.hadoop.classpath>${maven.test.classpath}</test.hive.hadoop.classpath>
     <test.log4j.scheme>file://</test.log4j.scheme>
     <test.tmp.dir>${project.build.directory}/tmp</test.tmp.dir>
+    <test.build.dir>${project.build.directory}</test.build.dir>
     <test.tmp.dir.uri>file://${test.tmp.dir}</test.tmp.dir.uri>
     <test.warehouse.dir>${project.build.directory}/warehouse</test.warehouse.dir>
     <test.warehouse.scheme>pfile://</test.warehouse.scheme>
@@ -1026,6 +1027,7 @@
             <test.data.dir>${basedir}/${hive.path.to.root}/data/files</test.data.dir>
             <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
             <test.tmp.dir.uri>${test.tmp.dir.uri}</test.tmp.dir.uri>
+            <test.build.dir>${test.build.dir}</test.build.dir>
             <test.dfs.mkdir>${test.dfs.mkdir}</test.dfs.mkdir>
             <test.output.overwrite>${test.output.overwrite}</test.output.overwrite>
             <test.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</test.warehouse.dir>

http://git-wip-us.apache.org/repos/asf/hive/blob/976e628f/ql/src/test/templates/TestCliDriver.vm
----------------------------------------------------------------------
diff --git a/ql/src/test/templates/TestCliDriver.vm b/ql/src/test/templates/TestCliDriver.vm
index 72cfab9..1961c75 100644
--- a/ql/src/test/templates/TestCliDriver.vm
+++ b/ql/src/test/templates/TestCliDriver.vm
@@ -17,23 +17,34 @@
  */
 package org.apache.hadoop.hive.cli;
 
+import com.google.common.base.Stopwatch;
 import org.apache.hadoop.hive.ql.QTestUtil;
 import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.util.concurrent.TimeUnit;
+
 public class $className {
 
+  private static final Logger LOG = LoggerFactory.getLogger(${className}.class);
+
   private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root"));
   private static QTestUtil qt;
 
   static {
 
+    Stopwatch stopwatch = new Stopwatch().start();
+    String message = "Starting TestCliDriver run at " + System.currentTimeMillis();
+    LOG.info(message);
+    System.err.println(message);
     MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode");
     String hiveConfDir = "$hiveConfDir";
     String initScript = "$initScript";
@@ -44,15 +55,29 @@ public class $className {
       if (!hiveConfDir.isEmpty()) {
         hiveConfDir = HIVE_ROOT + hiveConfDir;
       }
+      // TODO Is ZK startup required for TestCliDriver
+      // TODO Is LlapIo enabled required for TestCliDriver
       qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR,
-      hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore, true);
+      hiveConfDir, hadoopVer, initScript, cleanupScript, useHBaseMetastore, true, "$className");
+      message = "QTestUtil instance created. ElapsedTimeSinceStart=" + stopwatch.elapsed(
+          TimeUnit.MILLISECONDS);
+      LOG.info(message);
+      System.err.println(message);
 
       // do a one time initialization
       qt.cleanUp();
+      message = "Initialization cleanup done. ElapsedTimeSinceStart=" + stopwatch.elapsed(TimeUnit.MILLISECONDS);
+      LOG.info(message);
+      System.err.println(message);
+
       qt.createSources();
+      message = "Initialization createSources done. ElapsedTimeSinceStart=" + stopwatch.elapsed(TimeUnit.MILLISECONDS);
+      LOG.info(message);
+      System.err.println(message);
 
     } catch (Exception e) {
-      System.err.println("Exception: " + e.getMessage());
+      System.err.println("Exception: " + e.getMessage() + ". ElapsedTimeSinceStart="
+          + stopwatch.elapsed(TimeUnit.MILLISECONDS));
       e.printStackTrace();
       System.err.flush();
       fail("Unexpected exception in static initialization: "+e.getMessage());
@@ -62,6 +87,7 @@ public class $className {
   @Before
   public void setUp() {
     try {
+      // TODO This restarts ZK for each test. Is that requried ?
       qt.clearTestSideEffects();
     } catch (Exception e) {
       System.err.println("Exception: " + e.getMessage());
@@ -113,7 +139,9 @@ public class $className {
   private void runTest(String tname, String fname, String fpath) throws Exception {
     long startTime = System.currentTimeMillis();
     try {
-      System.err.println("Begin query: " + fname);
+      String message = "Begin query: " + fname + ", startTime=" + startTime;
+      System.err.println(message);
+      LOG.info(message);
 
       qt.addFile(fpath);
 
@@ -136,7 +164,9 @@ public class $className {
     }
 
     long elapsedTime = System.currentTimeMillis() - startTime;
-    System.err.println("Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s");
+    String message = "Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s";
+    System.err.println(message);
+    LOG.info(message);
     assertTrue("Test passed", true);
   }
 }


[36/58] [abbrv] hive git commit: HIVE-13340 : Vectorization: from_unixtime UDF shim (Gopal V via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13340 : Vectorization: from_unixtime UDF shim (Gopal V via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7049f49d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7049f49d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7049f49d

Branch: refs/heads/llap
Commit: 7049f49d9574587b2eb5896bab8415d7cd7c1ef1
Parents: e7f69f0
Author: Gopal V <go...@apache.org>
Authored: Wed Mar 23 02:07:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Apr 13 12:00:07 2016 -0700

----------------------------------------------------------------------
 .../ql/exec/vector/VectorizationContext.java    |   2 +
 .../optimizer/ConstantPropagateProcFactory.java |  22 ++-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   2 +
 ql/src/test/queries/clientpositive/foldts.q     |  20 +++
 ql/src/test/results/clientpositive/foldts.q.out | 154 +++++++++++++++++++
 .../clientpositive/udf_to_unix_timestamp.q.out  |   2 +-
 6 files changed, 197 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7049f49d/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 329c1d5..86025ef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -102,6 +102,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc;
 import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.udf.SettableUDF;
 import org.apache.hadoop.hive.ql.udf.UDFConv;
+import org.apache.hadoop.hive.ql.udf.UDFFromUnixTime;
 import org.apache.hadoop.hive.ql.udf.UDFHex;
 import org.apache.hadoop.hive.ql.udf.UDFRegExpExtract;
 import org.apache.hadoop.hive.ql.udf.UDFRegExpReplace;
@@ -761,6 +762,7 @@ public class VectorizationContext {
           || udfClass.equals(UDFRegExpExtract.class)
           || udfClass.equals(UDFRegExpReplace.class)
           || udfClass.equals(UDFConv.class)
+          || udfClass.equals(UDFFromUnixTime.class) && isIntFamily(arg0Type(expr))
           || isCastToIntFamily(udfClass) && isStringFamily(arg0Type(expr))
           || isCastToFloatFamily(udfClass) && isStringFamily(arg0Type(expr))
           || udfClass.equals(UDFToString.class) &&

http://git-wip-us.apache.org/repos/asf/hive/blob/7049f49d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
index bdc7448..8c1f34d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
@@ -77,6 +77,8 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUnixTimeStamp;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFWhen;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector;
@@ -229,7 +231,7 @@ public final class ConstantPropagateProcFactory {
   public static ExprNodeDesc foldExpr(ExprNodeGenericFuncDesc funcDesc) {
 
     GenericUDF udf = funcDesc.getGenericUDF();
-    if (!isDeterministicUdf(udf)) {
+    if (!isDeterministicUdf(udf, funcDesc.getChildren())) {
       return funcDesc;
     }
     return evaluateFunction(funcDesc.getGenericUDF(),funcDesc.getChildren(), funcDesc.getChildren());
@@ -347,7 +349,7 @@ public final class ConstantPropagateProcFactory {
       }
 
       // Don't evaluate nondeterministic function since the value can only calculate during runtime.
-      if (!isDeterministicUdf(udf)) {
+      if (!isDeterministicUdf(udf, newExprs)) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Function " + udf.getClass() + " is undeterministic. Don't evalulate immediately.");
         }
@@ -406,7 +408,7 @@ public final class ConstantPropagateProcFactory {
       }
 
       // Don't evaluate nondeterministic function since the value can only calculate during runtime.
-      if (!isDeterministicUdf(udf)) {
+      if (!isDeterministicUdf(udf, newExprs)) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Function " + udf.getClass() + " is undeterministic. Don't evaluate immediately.");
         }
@@ -457,12 +459,17 @@ public final class ConstantPropagateProcFactory {
     return desc;
   }
 
-  private static boolean isDeterministicUdf(GenericUDF udf) {
+  private static boolean isDeterministicUdf(GenericUDF udf,  List<ExprNodeDesc> children) {
     UDFType udfType = udf.getClass().getAnnotation(UDFType.class);
     if (udf instanceof GenericUDFBridge) {
       udfType = ((GenericUDFBridge) udf).getUdfClass().getAnnotation(UDFType.class);
     }
     if (udfType.deterministic() == false) {
+      if (udf.getClass().equals(GenericUDFUnixTimeStamp.class) 
+          && children != null && children.size() > 0) {
+        // unix_timestamp is polymorphic (ignore class annotations)
+        return true;
+      }
       return false;
     }
 
@@ -817,6 +824,13 @@ public final class ConstantPropagateProcFactory {
       }
     }
 
+    if (udf instanceof GenericUDFUnixTimeStamp) {
+      if (newExprs.size() >= 1) {
+        // unix_timestamp(args) -> to_unix_timestamp(args)
+        return ExprNodeGenericFuncDesc.newInstance(new GenericUDFToUnixTimeStamp(), newExprs);
+      }
+    }
+
     return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7049f49d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index d806b97..1ddd9be 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -126,6 +126,7 @@ import org.apache.hadoop.hive.ql.udf.UDFCos;
 import org.apache.hadoop.hive.ql.udf.UDFDayOfMonth;
 import org.apache.hadoop.hive.ql.udf.UDFDegrees;
 import org.apache.hadoop.hive.ql.udf.UDFExp;
+import org.apache.hadoop.hive.ql.udf.UDFFromUnixTime;
 import org.apache.hadoop.hive.ql.udf.UDFHex;
 import org.apache.hadoop.hive.ql.udf.UDFHour;
 import org.apache.hadoop.hive.ql.udf.UDFLength;
@@ -247,6 +248,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     supportedGenericUDFs.add(UDFSecond.class);
     supportedGenericUDFs.add(UDFWeekOfYear.class);
     supportedGenericUDFs.add(GenericUDFToUnixTimeStamp.class);
+    supportedGenericUDFs.add(UDFFromUnixTime.class);
 
     supportedGenericUDFs.add(GenericUDFDateAdd.class);
     supportedGenericUDFs.add(GenericUDFDateSub.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/7049f49d/ql/src/test/queries/clientpositive/foldts.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/foldts.q b/ql/src/test/queries/clientpositive/foldts.q
new file mode 100644
index 0000000..362cac2
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/foldts.q
@@ -0,0 +1,20 @@
+
+set hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+explain
+select ctimestamp1, unix_timestamp(ctimestamp1), to_unix_timestamp(ctimestamp1) from alltypesorc limit 1;
+
+select ctimestamp1, unix_timestamp(ctimestamp1), to_unix_timestamp(ctimestamp1) from alltypesorc limit 1;
+
+create temporary table src1orc stored as orc as select * from src1;
+
+explain
+select from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1; 
+
+select from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1; 
+
+explain
+select from_unixtime(unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1; 
+
+select from_unixtime(unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1; 

http://git-wip-us.apache.org/repos/asf/hive/blob/7049f49d/ql/src/test/results/clientpositive/foldts.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/foldts.q.out b/ql/src/test/results/clientpositive/foldts.q.out
new file mode 100644
index 0000000..4c78495
--- /dev/null
+++ b/ql/src/test/results/clientpositive/foldts.q.out
@@ -0,0 +1,154 @@
+PREHOOK: query: explain
+select ctimestamp1, unix_timestamp(ctimestamp1), to_unix_timestamp(ctimestamp1) from alltypesorc limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select ctimestamp1, unix_timestamp(ctimestamp1), to_unix_timestamp(ctimestamp1) from alltypesorc limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ctimestamp1 (type: timestamp), to_unix_timestamp(ctimestamp1) (type: bigint), to_unix_timestamp(ctimestamp1) (type: bigint)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select ctimestamp1, unix_timestamp(ctimestamp1), to_unix_timestamp(ctimestamp1) from alltypesorc limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select ctimestamp1, unix_timestamp(ctimestamp1), to_unix_timestamp(ctimestamp1) from alltypesorc limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+1969-12-31 15:59:46.674	-13	-13
+PREHOOK: query: create temporary table src1orc stored as orc as select * from src1
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src1orc
+POSTHOOK: query: create temporary table src1orc stored as orc as select * from src1
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src1orc
+PREHOOK: query: explain
+select from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+Wednesday
+PREHOOK: query: explain
+select from_unixtime(unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select from_unixtime(unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: alltypesorc
+            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: from_unixtime(to_unix_timestamp(ctimestamp1), 'EEEE') (type: string)
+              outputColumnNames: _col0
+              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 215 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select from_unixtime(unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+POSTHOOK: query: select from_unixtime(unix_timestamp(ctimestamp1), 'EEEE') from alltypesorc limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+#### A masked pattern was here ####
+Wednesday

http://git-wip-us.apache.org/repos/asf/hive/blob/7049f49d/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out
index ce82461..3d31664 100644
--- a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out
@@ -103,7 +103,7 @@ STAGE PLANS:
           alias: src
           Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
-            predicate: (unix_timestamp(key) > 10) (type: boolean)
+            predicate: (to_unix_timestamp(key) > 10) (type: boolean)
             Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)


[43/58] [abbrv] hive git commit: HIVE-13514: TestClearDanglingScratchDir fail on branch-1 (Daniel Dai, reviewed by Thejas Nair)

Posted by jd...@apache.org.
HIVE-13514: TestClearDanglingScratchDir fail on branch-1 (Daniel Dai, reviewed by Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/940fc7df
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/940fc7df
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/940fc7df

Branch: refs/heads/llap
Commit: 940fc7dfee075f6d71475a49b10cc508ef90c194
Parents: 976e628
Author: Daniel Dai <da...@hortonworks.com>
Authored: Thu Apr 14 10:47:03 2016 -0700
Committer: Daniel Dai <da...@hortonworks.com>
Committed: Thu Apr 14 10:47:03 2016 -0700

----------------------------------------------------------------------
 .../ql/session/TestClearDanglingScratchDir.java | 90 +++++++-------------
 1 file changed, 30 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/940fc7df/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
index 1007113..3cb80a7 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java
@@ -18,15 +18,9 @@
 package org.apache.hadoop.hive.ql.session;
 
 import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
 import java.io.PrintStream;
-import java.io.PrintWriter;
-import java.nio.channels.FileChannel;
 import java.util.UUID;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -45,36 +39,12 @@ public class TestClearDanglingScratchDir {
   private static HiveConf conf;
   private static Path scratchDir;
   private ByteArrayOutputStream stdout;
+  private ByteArrayOutputStream stderr;
   private PrintStream origStdoutPs;
-  private static File logFile;
+  private PrintStream origStderrPs;
 
   @BeforeClass
   static public void oneTimeSetup() throws Exception {
-    logFile = File.createTempFile("log", "");
-    File log4jConfig = File.createTempFile("config", ".properties");
-    log4jConfig.deleteOnExit();
-    PrintWriter pw = new PrintWriter(log4jConfig);
-    pw.println("appenders = console, file");
-    pw.println("appender.console.type = Console");
-    pw.println("appender.console.name = STDOUT");
-    pw.println("appender.console.layout.type = PatternLayout");
-    pw.println("appender.console.layout.pattern = %t %-5p %c{2} - %m%n");
-    pw.println("appender.file.type = File");
-    pw.println("appender.file.name = LOGFILE");
-    pw.println("appender.file.fileName = " + logFile.getAbsolutePath());
-    pw.println("appender.file.layout.type = PatternLayout");
-    pw.println("appender.file.layout.pattern = %t %-5p %c{2} - %m%n");
-    pw.println("rootLogger.level = debug");
-    pw.println("rootLogger.appenderRefs = stdout");
-    pw.println("rootLogger.appenderRef.stdout.ref = STDOUT");
-    pw.println("loggers = file");
-    pw.println("logger.file.name = SessionState");
-    pw.println("logger.file.level = debug");
-    pw.println("logger.file.appenderRefs = file");
-    pw.println("logger.file.appenderRef.file.ref = LOGFILE");
-    pw.close();
-    System.setProperty("log4j.configurationFile", log4jConfig.getAbsolutePath());
-
     m_dfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).build();
     conf = new HiveConf();
     conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true");
@@ -95,67 +65,67 @@ public class TestClearDanglingScratchDir {
     m_dfs.shutdown();
   }
 
-  public void redirectOutput() throws IOException {
+  public void redirectStdOutErr() {
     stdout = new ByteArrayOutputStream();
     PrintStream psStdout = new PrintStream(stdout);
     origStdoutPs = System.out;
     System.setOut(psStdout);
 
-    FileOutputStream fos = new FileOutputStream(logFile, true);
-    FileChannel outChan = fos.getChannel();
-    outChan.truncate(0);
-    outChan.close();
-    fos.close();
+    stderr = new ByteArrayOutputStream();
+    PrintStream psStderr = new PrintStream(stderr);
+    origStderrPs = System.err;
+    System.setErr(psStderr);
   }
 
-  public void rollbackOutput() {
+  public void rollbackStdOutErr() {
     System.setOut(origStdoutPs);
+    System.setErr(origStderrPs);
   }
 
   @Test
   public void testClearDanglingScratchDir() throws Exception {
 
     // No scratch dir initially
-    redirectOutput();
-    ClearDanglingScratchDir.main(new String[]{"-s",
+    redirectStdOutErr();
+    ClearDanglingScratchDir.main(new String[]{"-v", "-s",
         m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
-    rollbackOutput();
-    Assert.assertTrue(FileUtils.readFileToString(logFile).contains("Cannot find any scratch directory to clear"));
+    rollbackStdOutErr();
+    Assert.assertTrue(stderr.toString().contains("Cannot find any scratch directory to clear"));
 
     // Create scratch dir without lock files
     m_dfs.getFileSystem().mkdirs(new Path(new Path(scratchDir, "dummy"), UUID.randomUUID().toString()));
-    redirectOutput();
-    ClearDanglingScratchDir.main(new String[]{"-s",
+    redirectStdOutErr();
+    ClearDanglingScratchDir.main(new String[]{"-v", "-s",
         m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
-    rollbackOutput();
-    Assert.assertEquals(StringUtils.countMatches(FileUtils.readFileToString(logFile),
+    rollbackStdOutErr();
+    Assert.assertEquals(StringUtils.countMatches(stderr.toString(),
         "since it does not contain " + SessionState.LOCK_FILE_NAME), 1);
-    Assert.assertTrue(FileUtils.readFileToString(logFile).contains("Cannot find any scratch directory to clear"));
+    Assert.assertTrue(stderr.toString().contains("Cannot find any scratch directory to clear"));
 
     // One live session
     SessionState ss = SessionState.start(conf);
-    redirectOutput();
-    ClearDanglingScratchDir.main(new String[]{"-s",
+    redirectStdOutErr();
+    ClearDanglingScratchDir.main(new String[]{"-v", "-s",
         m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
-    rollbackOutput();
-    Assert.assertEquals(StringUtils.countMatches(FileUtils.readFileToString(logFile), "is being used by live process"), 1);
+    rollbackStdOutErr();
+    Assert.assertEquals(StringUtils.countMatches(stderr.toString(), "is being used by live process"), 1);
 
     // One dead session with dry-run
     ss.releaseSessionLockFile();
-    redirectOutput();
-    ClearDanglingScratchDir.main(new String[]{"-r", "-s",
+    redirectStdOutErr();
+    ClearDanglingScratchDir.main(new String[]{"-r", "-v", "-s",
         m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
-    rollbackOutput();
+    rollbackStdOutErr();
     // Find one session dir to remove
     Assert.assertFalse(stdout.toString().isEmpty());
 
     // Remove the dead session dir
-    redirectOutput();
-    ClearDanglingScratchDir.main(new String[]{"-s",
+    redirectStdOutErr();
+    ClearDanglingScratchDir.main(new String[]{"-v", "-s",
         m_dfs.getFileSystem().getUri().toString() + scratchDir.toUri().toString()});
-    rollbackOutput();
-    Assert.assertTrue(FileUtils.readFileToString(logFile).contains("Removing 1 scratch directories"));
-    Assert.assertEquals(StringUtils.countMatches(FileUtils.readFileToString(logFile), "removed"), 1);
+    rollbackStdOutErr();
+    Assert.assertTrue(stderr.toString().contains("Removing 1 scratch directories"));
+    Assert.assertEquals(StringUtils.countMatches(stderr.toString(), "removed"), 1);
     ss.close();
   }
 }


[05/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
index c6a5303..6cd74c3 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -55,7 +55,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -272,7 +272,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -290,7 +290,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
index 2ecf671..2d70062 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -55,7 +55,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -272,7 +272,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -290,7 +290,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
index d4ba2a0..312b3bd 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
@@ -43,7 +43,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((sqrt(key) <> 13.0) and (key > '10')) and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -60,7 +60,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) and (sqrt(key) <> 13.0)) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -78,7 +78,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) and (sqrt(key) <> 13.0)) (type: boolean)
+                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -417,7 +417,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((sqrt(key) <> 13.0) and (key > '10')) and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+                    predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -434,7 +434,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) and (sqrt(key) <> 13.0)) (type: boolean)
+                    predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -452,7 +452,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) and (sqrt(key) <> 13.0)) (type: boolean)
+                    predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
index 853c454..4efa206 100644
--- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out
@@ -1394,7 +1394,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
                     Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1515,7 +1515,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/sample8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sample8.q.out b/ql/src/test/results/clientpositive/spark/sample8.q.out
index 1734361..ae145b2 100644
--- a/ql/src/test/results/clientpositive/spark/sample8.q.out
+++ b/ql/src/test/results/clientpositive/spark/sample8.q.out
@@ -103,7 +103,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: true
-                    predicate: (((((hash(key) & 2147483647) % 10) = 0) and value is not null) and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean)
+                    predicate: ((((hash(key) & 2147483647) % 10) = 0) and value is not null and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean)
                     Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)
@@ -172,7 +172,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: true
-                    predicate: (((((hash(key) & 2147483647) % 1) = 0) and value is not null) and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean)
+                    predicate: ((((hash(key) & 2147483647) % 1) = 0) and value is not null and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/semijoin.q.out b/ql/src/test/results/clientpositive/spark/semijoin.q.out
index b1dd351..085257e 100644
--- a/ql/src/test/results/clientpositive/spark/semijoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/semijoin.q.out
@@ -761,7 +761,7 @@ STAGE PLANS:
                   alias: t2
                   Statistics: Num rows: 11 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+                    predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out
index c329883..921c4ba 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out
@@ -66,7 +66,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
+                    predicate: (key is not null and val is not null and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -83,7 +83,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
+                    predicate: (key is not null and val is not null and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -100,7 +100,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -117,7 +117,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out
index 70007d9..36b7306 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out
@@ -97,7 +97,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (key = '2')) (type: boolean)
+                    predicate: (key is not null and val is not null and (key = '2')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -151,7 +151,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not (key = '2'))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not (key = '2'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out
index a1b0b00..62540cc 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out
@@ -66,7 +66,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
+                    predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -83,7 +83,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
+                    predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -100,7 +100,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -117,7 +117,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out
index 928d394..ca33d86 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out
@@ -273,7 +273,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
+                    predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -290,7 +290,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
+                    predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -307,7 +307,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -324,7 +324,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out
index f8f633d..8c255d4 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+                    predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -91,7 +91,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+                    predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -108,7 +108,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -125,7 +125,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -359,7 +359,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+                    predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -376,7 +376,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+                    predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -393,7 +393,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)
@@ -410,7 +410,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+                    predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out
index 876d996..098cc59 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out
@@ -87,7 +87,7 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 3 Data size: 414 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((userid is not null and pageid is not null) and postid is not null) and type is not null) (type: boolean)
+                    predicate: (userid is not null and pageid is not null and postid is not null and type is not null) (type: boolean)
                     Statistics: Num rows: 3 Data size: 414 Basic stats: COMPLETE Column stats: NONE
                     Sorted Merge Bucket Map Join Operator
                       condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
index 20babcc..c13bb4f 100644
--- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out
@@ -636,7 +636,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
+                    predicate: ((key < 8) and (key < 6)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_2.q.out b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_2.q.out
index 6f83401..72fccc4 100644
--- a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_2.q.out
@@ -79,15 +79,15 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
-                    Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (value is not null and (key < 10)) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Sorted Merge Bucket Map Join Operator
                       condition map:
                            Inner Join 0 to 1
                       keys:
                         0 key (type: string), value (type: string)
                         1 key (type: string), value (type: string)
-                      Statistics: Num rows: 45 Data size: 478 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash
@@ -104,17 +104,13 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: bigint)
-                  outputColumnNames: _col0
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_3.q.out b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_3.q.out
index d705d9a..7be0a38 100644
--- a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_3.q.out
@@ -79,15 +79,15 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
-                    Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (value is not null and (key < 10)) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Sorted Merge Bucket Map Join Operator
                       condition map:
                            Inner Join 0 to 1
                       keys:
                         0 key (type: string), value (type: string)
                         1 key (type: string), value (type: string)
-                      Statistics: Num rows: 45 Data size: 478 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash
@@ -104,17 +104,13 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: bigint)
-                  outputColumnNames: _col0
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_4.q.out b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_4.q.out
index 0a70e59..f0885f1 100644
--- a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_4.q.out
+++ b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_4.q.out
@@ -76,8 +76,8 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
-                    Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (value is not null and (key < 10)) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
                       keys:
                         0 key (type: string), value (type: string)
@@ -97,8 +97,8 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
-                    Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (value is not null and (key < 10)) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
                            Inner Join 0 to 1
@@ -107,7 +107,7 @@ STAGE PLANS:
                         1 key (type: string), value (type: string)
                       input vertices:
                         1 Map 3
-                      Statistics: Num rows: 45 Data size: 478 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash
@@ -126,17 +126,13 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: bigint)
-                  outputColumnNames: _col0
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_8.q.out b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_8.q.out
index 4614d9c..09f1dea 100644
--- a/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_8.q.out
+++ b/ql/src/test/results/clientpositive/spark/sort_merge_join_desc_8.q.out
@@ -149,17 +149,13 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: bigint)
-                  outputColumnNames: _col0
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -208,8 +204,8 @@ STAGE PLANS:
                   alias: b
                   Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and value2 is not null) and (key < 10)) (type: boolean)
-                    Statistics: Num rows: 41 Data size: 517 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (value2 is not null and (key < 10)) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 2095 Basic stats: COMPLETE Column stats: NONE
                     Spark HashTable Sink Operator
                       keys:
                         0 key (type: string), value2 (type: string)
@@ -229,8 +225,8 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 10218 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and value2 is not null) and (key < 10)) (type: boolean)
-                    Statistics: Num rows: 41 Data size: 837 Basic stats: COMPLETE Column stats: NONE
+                    predicate: (value2 is not null and (key < 10)) (type: boolean)
+                    Statistics: Num rows: 166 Data size: 3392 Basic stats: COMPLETE Column stats: NONE
                     Map Join Operator
                       condition map:
                            Inner Join 0 to 1
@@ -239,7 +235,7 @@ STAGE PLANS:
                         1 key (type: string), value2 (type: string)
                       input vertices:
                         1 Map 3
-                      Statistics: Num rows: 45 Data size: 920 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 182 Data size: 3731 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: count()
                         mode: hash
@@ -258,17 +254,13 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: bigint)
-                  outputColumnNames: _col0
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator


[18/58] [abbrv] hive git commit: HIVE-13430: Pass error message to failure hook (Jimmy, reviewed by Szehon)

Posted by jd...@apache.org.
HIVE-13430: Pass error message to failure hook (Jimmy, reviewed by Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1cb4ce87
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1cb4ce87
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1cb4ce87

Branch: refs/heads/llap
Commit: 1cb4ce8714d216af6e1a9f6496392d07130dffbe
Parents: 42fa60a
Author: Jimmy Xiang <jx...@apache.org>
Authored: Tue Apr 5 10:06:12 2016 -0700
Committer: Jimmy Xiang <jx...@apache.org>
Committed: Sun Apr 10 20:14:11 2016 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/Driver.java    | 15 ++++++++-------
 .../org/apache/hadoop/hive/ql/exec/MoveTask.java     |  1 +
 .../org/apache/hadoop/hive/ql/hooks/HookContext.java | 11 ++++++++++-
 3 files changed, 19 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1cb4ce87/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 65ed1db..abf94ff 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -36,13 +36,7 @@ import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Sets;
-
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -118,7 +112,13 @@ import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.mapred.ClusterStatus;
 import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hive.common.util.ShutdownHookManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Sets;
 
 public class Driver implements CommandProcessor {
 
@@ -1629,7 +1629,9 @@ public class Driver implements CommandProcessor {
             continue;
 
           } else {
+            setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
             hookContext.setHookType(HookContext.HookType.ON_FAILURE_HOOK);
+            hookContext.setErrorMessage(errorMessage);
             // Get all the failure execution hooks and execute them.
             for (Hook ofh : getHooks(HiveConf.ConfVars.ONFAILUREHOOKS)) {
               perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
@@ -1638,7 +1640,6 @@ public class Driver implements CommandProcessor {
 
               perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.FAILURE_HOOK + ofh.getClass().getName());
             }
-            setErrorMsgAndDetail(exitVal, result.getTaskError(), tsk);
             SQLState = "08S01";
             console.printError(errorMessage);
             driverCxt.shutdown();

http://git-wip-us.apache.org/repos/asf/hive/blob/1cb4ce87/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 4a546d1..54592cc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -527,6 +527,7 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
     } catch (Exception e) {
       console.printError("Failed with exception " + e.getMessage(), "\n"
           + StringUtils.stringifyException(e));
+      setException(e);
       return (1);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1cb4ce87/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
index bed17e9..6fd1f66 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java
@@ -51,6 +51,7 @@ public class HookContext {
   private Index depMap;
   private UserGroupInformation ugi;
   private HookType hookType;
+  private String errorMessage;
   final private Map<String, ContentSummary> inputPathToContentSummary;
   private final String ipAddress;
   private final String userName;
@@ -161,7 +162,15 @@ public class HookContext {
 
   public String getIpAddress() {
     return this.ipAddress;
- }
+  }
+
+  public void setErrorMessage(String errorMessage) {
+    this.errorMessage = errorMessage;
+  }
+
+  public String getErrorMessage() {
+    return errorMessage;
+  }
 
   public String getOperationName() {
     return queryPlan.getOperationName();


[32/58] [abbrv] hive git commit: HIVE-13499 - TestJdbcWithMiniHS2.testConcurrentStatements is hanging - Temp patch to disable test

Posted by jd...@apache.org.
HIVE-13499 - TestJdbcWithMiniHS2.testConcurrentStatements is hanging - Temp patch to disable test


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8c182ae1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8c182ae1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8c182ae1

Branch: refs/heads/llap
Commit: 8c182ae10bc03be98ad38defde7518eeadfe9e1b
Parents: ddab69c
Author: Thejas Nair <th...@hortonworks.com>
Authored: Wed Apr 13 00:53:25 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Wed Apr 13 00:53:25 2016 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java    | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8c182ae1/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
index 10c8ff2..9e3c7e1 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java
@@ -64,6 +64,7 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 
 public class TestJdbcWithMiniHS2 {
@@ -130,6 +131,7 @@ public class TestJdbcWithMiniHS2 {
     stmt.close();
   }
 
+  @Ignore("Disabling test until hanging issue is resolved.")
   @Test
   public void testConcurrentStatements() throws Exception {
     String tableName = "testConcurrentStatements";


[38/58] [abbrv] hive git commit: HIVE-12159: Create vectorized readers for the complex types (Owen O'Malley, reviewed by Matt McCline)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
index adb52f0..a52b3ef 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedORCReader.java
@@ -151,12 +151,11 @@ public class TestVectorizedORCReader {
         OrcFile.readerOptions(conf));
     RecordReaderImpl vrr = (RecordReaderImpl) vreader.rows();
     RecordReaderImpl rr = (RecordReaderImpl) reader.rows();
-    VectorizedRowBatch batch = null;
+    VectorizedRowBatch batch = reader.getSchema().createRowBatch();
     OrcStruct row = null;
 
     // Check Vectorized ORC reader against ORC row reader
-    while (vrr.hasNext()) {
-      batch = vrr.nextBatch(batch);
+    while (vrr.nextBatch(batch)) {
       for (int i = 0; i < batch.size; i++) {
         row = (OrcStruct) rr.next(row);
         for (int j = 0; j < batch.cols.length; j++) {
@@ -239,6 +238,6 @@ public class TestVectorizedORCReader {
       Assert.assertEquals(false, batch.cols[8].noNulls);
       Assert.assertEquals(false, batch.cols[9].noNulls);
     }
-    Assert.assertEquals(false, rr.hasNext());
+    Assert.assertEquals(false, rr.nextBatch(batch));
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java
index 99744cd..f915a7e 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/BytesColumnVector.java
@@ -338,6 +338,17 @@ public class BytesColumnVector extends ColumnVector {
     initBuffer(0);
   }
 
+  public String toString(int row) {
+    if (isRepeating) {
+      row = 0;
+    }
+    if (noNulls || !isNull[row]) {
+      return new String(vector[row], start[row], length[row]);
+    } else {
+      return null;
+    }
+  }
+
   @Override
   public void stringifyValue(StringBuilder buffer, int row) {
     if (isRepeating) {

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
index c0dd5ed..d971339 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
@@ -392,4 +392,4 @@ public class TimestampColumnVector extends ColumnVector {
       buffer.append("null");
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/0dd4621f/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java
index 298d588..0c61243 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/UnionColumnVector.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.exec.vector;
 
-import java.util.Arrays;
-
 /**
  * The representation of a vectorized column of struct objects.
  *


[45/58] [abbrv] hive git commit: HIVE-13505. Skip running TestDummy where possibe during precommit builds. (Siddharth Seth, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13505. Skip running TestDummy where possibe during precommit builds. (Siddharth Seth, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a207923f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a207923f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a207923f

Branch: refs/heads/llap
Commit: a207923ff847b62209f97682c0e3e7a649ae131d
Parents: e3e43c6
Author: Siddharth Seth <ss...@apache.org>
Authored: Thu Apr 14 11:50:28 2016 -0700
Committer: Siddharth Seth <ss...@apache.org>
Committed: Thu Apr 14 11:50:28 2016 -0700

----------------------------------------------------------------------
 testutils/ptest2/src/main/resources/source-prep.vm | 2 --
 1 file changed, 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a207923f/testutils/ptest2/src/main/resources/source-prep.vm
----------------------------------------------------------------------
diff --git a/testutils/ptest2/src/main/resources/source-prep.vm b/testutils/ptest2/src/main/resources/source-prep.vm
index 97fb69c..9c83a14 100644
--- a/testutils/ptest2/src/main/resources/source-prep.vm
+++ b/testutils/ptest2/src/main/resources/source-prep.vm
@@ -97,10 +97,8 @@ cd $workingDir/
       done
     #end
     mvn -B clean install -DskipTests -Dmaven.repo.local=$workingDir/maven $mavenArgs $mavenBuildArgs
-    mvn -B test -Dmaven.repo.local=$workingDir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
     cd itests
     mvn -B clean install -DskipTests -Dmaven.repo.local=$workingDir/maven $mavenArgs $mavenBuildArgs
-    mvn -B test -Dmaven.repo.local=$workingDir/maven -Dtest=TestDummy $mavenArgs $mavenTestArgs
   elif [[ "${buildTool}" == "ant" ]]
   then
     ant $antArgs -Divy.default.ivy.user.dir=$workingDir/ivy \


[08/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query27.q.out b/ql/src/test/results/clientpositive/perf/query27.q.out
index 3a32d7b..865d62c 100644
--- a/ql/src/test/results/clientpositive/perf/query27.q.out
+++ b/ql/src/test/results/clientpositive/perf/query27.q.out
@@ -85,7 +85,7 @@ Stage-0
                                         Select Operator [SEL_2] (rows=1 width=0)
                                           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                           Filter Operator [FIL_50] (rows=1 width=0)
-                                            predicate:(((ss_cdemo_sk is not null and ss_sold_date_sk is not null) and ss_store_sk is not null) and ss_item_sk is not null)
+                                            predicate:(ss_cdemo_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_item_sk is not null)
                                             TableScan [TS_0] (rows=1 width=0)
                                               default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_cdemo_sk","ss_store_sk","ss_quantity","ss_list_price","ss_sales_price","ss_coupon_amt"]
                                     <-Map 8 [SIMPLE_EDGE]
@@ -94,7 +94,7 @@ Stage-0
                                         Select Operator [SEL_5] (rows=2475 width=362)
                                           Output:["_col0"]
                                           Filter Operator [FIL_51] (rows=2475 width=362)
-                                            predicate:((((cd_gender = 'F') and (cd_marital_status = 'D')) and (cd_education_status = 'Unknown')) and cd_demo_sk is not null)
+                                            predicate:((cd_gender = 'F') and (cd_marital_status = 'D') and (cd_education_status = 'Unknown') and cd_demo_sk is not null)
                                             TableScan [TS_3] (rows=19800 width=362)
                                               default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query29.q.out b/ql/src/test/results/clientpositive/perf/query29.q.out
index 299f16e..0f4116a 100644
--- a/ql/src/test/results/clientpositive/perf/query29.q.out
+++ b/ql/src/test/results/clientpositive/perf/query29.q.out
@@ -88,7 +88,7 @@ Stage-0
                                         Select Operator [SEL_14] (rows=18262 width=1119)
                                           Output:["_col0"]
                                           Filter Operator [FIL_93] (rows=18262 width=1119)
-                                            predicate:((d_moy BETWEEN 2 AND 5 and (d_year = 2000)) and d_date_sk is not null)
+                                            predicate:(d_moy BETWEEN 2 AND 5 and (d_year = 2000) and d_date_sk is not null)
                                             TableScan [TS_12] (rows=73049 width=1119)
                                               default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                     <-Reducer 4 [SIMPLE_EDGE]
@@ -102,7 +102,7 @@ Stage-0
                                             Select Operator [SEL_11] (rows=18262 width=1119)
                                               Output:["_col0"]
                                               Filter Operator [FIL_92] (rows=18262 width=1119)
-                                                predicate:(((d_moy = 2) and (d_year = 2000)) and d_date_sk is not null)
+                                                predicate:((d_moy = 2) and (d_year = 2000) and d_date_sk is not null)
                                                 TableScan [TS_9] (rows=73049 width=1119)
                                                   default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                         <-Reducer 3 [SIMPLE_EDGE]
@@ -116,7 +116,7 @@ Stage-0
                                                 Select Operator [SEL_8] (rows=1 width=0)
                                                   Output:["_col0","_col1","_col2","_col3"]
                                                   Filter Operator [FIL_91] (rows=1 width=0)
-                                                    predicate:((cs_bill_customer_sk is not null and cs_item_sk is not null) and cs_sold_date_sk is not null)
+                                                    predicate:(cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                                     TableScan [TS_6] (rows=1 width=0)
                                                       default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
                                             <-Reducer 2 [SIMPLE_EDGE]
@@ -130,7 +130,7 @@ Stage-0
                                                     Select Operator [SEL_2] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                                       Filter Operator [FIL_89] (rows=1 width=0)
-                                                        predicate:((((ss_item_sk is not null and ss_customer_sk is not null) and ss_ticket_number is not null) and ss_sold_date_sk is not null) and ss_store_sk is not null)
+                                                        predicate:(ss_item_sk is not null and ss_customer_sk is not null and ss_ticket_number is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
                                                         TableScan [TS_0] (rows=1 width=0)
                                                           default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
                                                 <-Map 11 [SIMPLE_EDGE]
@@ -139,7 +139,7 @@ Stage-0
                                                     Select Operator [SEL_5] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4"]
                                                       Filter Operator [FIL_90] (rows=1 width=0)
-                                                        predicate:(((sr_item_sk is not null and sr_customer_sk is not null) and sr_ticket_number is not null) and sr_returned_date_sk is not null)
+                                                        predicate:(sr_item_sk is not null and sr_customer_sk is not null and sr_ticket_number is not null and sr_returned_date_sk is not null)
                                                         TableScan [TS_3] (rows=1 width=0)
                                                           default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query31.q.out b/ql/src/test/results/clientpositive/perf/query31.q.out
index a3c47a5..3cb7079 100644
--- a/ql/src/test/results/clientpositive/perf/query31.q.out
+++ b/ql/src/test/results/clientpositive/perf/query31.q.out
@@ -88,7 +88,7 @@ Stage-0
                                       Select Operator [SEL_118] (rows=18262 width=1119)
                                         Output:["_col0"]
                                         Filter Operator [FIL_266] (rows=18262 width=1119)
-                                          predicate:(((d_qoy = 3) and (d_year = 1998)) and d_date_sk is not null)
+                                          predicate:((d_qoy = 3) and (d_year = 1998) and d_date_sk is not null)
                                           TableScan [TS_116] (rows=73049 width=1119)
                                             default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                 <-Reducer 5 [SIMPLE_EDGE]
@@ -145,7 +145,7 @@ Stage-0
                                               Select Operator [SEL_26] (rows=18262 width=1119)
                                                 Output:["_col0"]
                                                 Filter Operator [FIL_254] (rows=18262 width=1119)
-                                                  predicate:(((d_qoy = 2) and (d_year = 1998)) and d_date_sk is not null)
+                                                  predicate:((d_qoy = 2) and (d_year = 1998) and d_date_sk is not null)
                                                   TableScan [TS_24] (rows=73049 width=1119)
                                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                         <-Reducer 19 [SIMPLE_EDGE]
@@ -193,7 +193,7 @@ Stage-0
                                               Select Operator [SEL_47] (rows=18262 width=1119)
                                                 Output:["_col0"]
                                                 Filter Operator [FIL_257] (rows=18262 width=1119)
-                                                  predicate:(((d_qoy = 3) and (d_year = 1998)) and d_date_sk is not null)
+                                                  predicate:((d_qoy = 3) and (d_year = 1998) and d_date_sk is not null)
                                                   TableScan [TS_45] (rows=73049 width=1119)
                                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                         <-Reducer 25 [SIMPLE_EDGE]
@@ -241,7 +241,7 @@ Stage-0
                                               Select Operator [SEL_68] (rows=18262 width=1119)
                                                 Output:["_col0"]
                                                 Filter Operator [FIL_260] (rows=18262 width=1119)
-                                                  predicate:(((d_qoy = 1) and (d_year = 1998)) and d_date_sk is not null)
+                                                  predicate:((d_qoy = 1) and (d_year = 1998) and d_date_sk is not null)
                                                   TableScan [TS_66] (rows=73049 width=1119)
                                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                         <-Reducer 31 [SIMPLE_EDGE]
@@ -289,7 +289,7 @@ Stage-0
                                               Select Operator [SEL_89] (rows=18262 width=1119)
                                                 Output:["_col0"]
                                                 Filter Operator [FIL_263] (rows=18262 width=1119)
-                                                  predicate:(((d_qoy = 2) and (d_year = 1998)) and d_date_sk is not null)
+                                                  predicate:((d_qoy = 2) and (d_year = 1998) and d_date_sk is not null)
                                                   TableScan [TS_87] (rows=73049 width=1119)
                                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                         <-Reducer 4 [SIMPLE_EDGE]
@@ -337,7 +337,7 @@ Stage-0
                                               Select Operator [SEL_5] (rows=18262 width=1119)
                                                 Output:["_col0"]
                                                 Filter Operator [FIL_251] (rows=18262 width=1119)
-                                                  predicate:(((d_qoy = 1) and (d_year = 1998)) and d_date_sk is not null)
+                                                  predicate:((d_qoy = 1) and (d_year = 1998) and d_date_sk is not null)
                                                   TableScan [TS_3] (rows=73049 width=1119)
                                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query34.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query34.q.out b/ql/src/test/results/clientpositive/perf/query34.q.out
index a08c3ff..0586813 100644
--- a/ql/src/test/results/clientpositive/perf/query34.q.out
+++ b/ql/src/test/results/clientpositive/perf/query34.q.out
@@ -57,7 +57,7 @@ Stage-0
                                 Select Operator [SEL_11] (rows=1200 width=107)
                                   Output:["_col0"]
                                   Filter Operator [FIL_55] (rows=1200 width=107)
-                                    predicate:(((((hd_buy_potential = '1001-5000') or (hd_buy_potential = '5001-10000')) and (hd_vehicle_count > 0)) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.2)) ELSE (null) END) and hd_demo_sk is not null)
+                                    predicate:(((hd_buy_potential = '1001-5000') or (hd_buy_potential = '5001-10000')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.2)) ELSE (null) END and hd_demo_sk is not null)
                                     TableScan [TS_9] (rows=7200 width=107)
                                       default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_buy_potential","hd_dep_count","hd_vehicle_count"]
                             <-Reducer 3 [SIMPLE_EDGE]
@@ -85,7 +85,7 @@ Stage-0
                                         Select Operator [SEL_2] (rows=1 width=0)
                                           Output:["_col0","_col1","_col2","_col3","_col4"]
                                           Filter Operator [FIL_52] (rows=1 width=0)
-                                            predicate:(((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_hdemo_sk is not null) and ss_customer_sk is not null)
+                                            predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_hdemo_sk is not null and ss_customer_sk is not null)
                                             TableScan [TS_0] (rows=1 width=0)
                                               default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_hdemo_sk","ss_store_sk","ss_ticket_number"]
                                     <-Map 8 [SIMPLE_EDGE]
@@ -94,7 +94,7 @@ Stage-0
                                         Select Operator [SEL_5] (rows=36524 width=1119)
                                           Output:["_col0"]
                                           Filter Operator [FIL_53] (rows=36524 width=1119)
-                                            predicate:(((d_year) IN (1998, 1999, 2000) and (d_dom BETWEEN 1 AND 3 or d_dom BETWEEN 25 AND 28)) and d_date_sk is not null)
+                                            predicate:((d_year) IN (1998, 1999, 2000) and (d_dom BETWEEN 1 AND 3 or d_dom BETWEEN 25 AND 28) and d_date_sk is not null)
                                             TableScan [TS_3] (rows=73049 width=1119)
                                               default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_dom"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query39.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query39.q.out b/ql/src/test/results/clientpositive/perf/query39.q.out
index fb77c7e..52c6b7c 100644
--- a/ql/src/test/results/clientpositive/perf/query39.q.out
+++ b/ql/src/test/results/clientpositive/perf/query39.q.out
@@ -56,7 +56,7 @@ Stage-0
                                     Select Operator [SEL_39] (rows=18262 width=1119)
                                       Output:["_col0"]
                                       Filter Operator [FIL_96] (rows=18262 width=1119)
-                                        predicate:(((d_year = 1999) and (d_moy = 4)) and d_date_sk is not null)
+                                        predicate:((d_year = 1999) and (d_moy = 4) and d_date_sk is not null)
                                         TableScan [TS_37] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                 <-Reducer 13 [SIMPLE_EDGE]
@@ -84,7 +84,7 @@ Stage-0
                                             Select Operator [SEL_30] (rows=1 width=0)
                                               Output:["_col0","_col1","_col2","_col3"]
                                               Filter Operator [FIL_93] (rows=1 width=0)
-                                                predicate:((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null)
+                                                predicate:(inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null)
                                                 TableScan [TS_28] (rows=1 width=0)
                                                   default@inventory,inventory,Tbl:PARTIAL,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
                                         <-Map 16 [SIMPLE_EDGE]
@@ -122,7 +122,7 @@ Stage-0
                                     Select Operator [SEL_11] (rows=18262 width=1119)
                                       Output:["_col0"]
                                       Filter Operator [FIL_92] (rows=18262 width=1119)
-                                        predicate:(((d_year = 1999) and (d_moy = 3)) and d_date_sk is not null)
+                                        predicate:((d_year = 1999) and (d_moy = 3) and d_date_sk is not null)
                                         TableScan [TS_9] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                 <-Reducer 3 [SIMPLE_EDGE]
@@ -150,7 +150,7 @@ Stage-0
                                             Select Operator [SEL_2] (rows=1 width=0)
                                               Output:["_col0","_col1","_col2","_col3"]
                                               Filter Operator [FIL_89] (rows=1 width=0)
-                                                predicate:((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null)
+                                                predicate:(inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null)
                                                 TableScan [TS_0] (rows=1 width=0)
                                                   default@inventory,inventory,Tbl:PARTIAL,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
                                         <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query40.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query40.q.out b/ql/src/test/results/clientpositive/perf/query40.q.out
index 5e2ad72..34ceb71 100644
--- a/ql/src/test/results/clientpositive/perf/query40.q.out
+++ b/ql/src/test/results/clientpositive/perf/query40.q.out
@@ -83,7 +83,7 @@ Stage-0
                                       Select Operator [SEL_2] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2","_col3","_col4"]
                                         Filter Operator [FIL_50] (rows=1 width=0)
-                                          predicate:((cs_warehouse_sk is not null and cs_item_sk is not null) and cs_sold_date_sk is not null)
+                                          predicate:(cs_warehouse_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                           TableScan [TS_0] (rows=1 width=0)
                                             default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_order_number","cs_sales_price"]
                                   <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query42.q.out b/ql/src/test/results/clientpositive/perf/query42.q.out
index 28d11df..1640d4e 100644
--- a/ql/src/test/results/clientpositive/perf/query42.q.out
+++ b/ql/src/test/results/clientpositive/perf/query42.q.out
@@ -55,7 +55,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=18262 width=1119)
                                   Output:["_col0"]
                                   Filter Operator [FIL_30] (rows=18262 width=1119)
-                                    predicate:(((d_moy = 12) and (d_year = 1998)) and d_date_sk is not null)
+                                    predicate:((d_moy = 12) and (d_year = 1998) and d_date_sk is not null)
                                     TableScan [TS_0] (rows=73049 width=1119)
                                       default@date_dim,dt,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                             <-Map 6 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query45.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query45.q.out b/ql/src/test/results/clientpositive/perf/query45.q.out
index 2985ba9..17bc5ee 100644
--- a/ql/src/test/results/clientpositive/perf/query45.q.out
+++ b/ql/src/test/results/clientpositive/perf/query45.q.out
@@ -74,7 +74,7 @@ Stage-0
                                 Select Operator [SEL_11] (rows=18262 width=1119)
                                   Output:["_col0"]
                                   Filter Operator [FIL_65] (rows=18262 width=1119)
-                                    predicate:(((d_qoy = 2) and (d_year = 2000)) and d_date_sk is not null)
+                                    predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
                                     TableScan [TS_9] (rows=73049 width=1119)
                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                             <-Reducer 3 [SIMPLE_EDGE]
@@ -102,7 +102,7 @@ Stage-0
                                         Select Operator [SEL_2] (rows=1 width=0)
                                           Output:["_col0","_col1","_col2","_col3"]
                                           Filter Operator [FIL_62] (rows=1 width=0)
-                                            predicate:((ws_bill_customer_sk is not null and ws_sold_date_sk is not null) and ws_item_sk is not null)
+                                            predicate:(ws_bill_customer_sk is not null and ws_sold_date_sk is not null and ws_item_sk is not null)
                                             TableScan [TS_0] (rows=1 width=0)
                                               default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk","ws_sales_price"]
                                     <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query46.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query46.q.out b/ql/src/test/results/clientpositive/perf/query46.q.out
index 62d7e21..2bd87aa 100644
--- a/ql/src/test/results/clientpositive/perf/query46.q.out
+++ b/ql/src/test/results/clientpositive/perf/query46.q.out
@@ -119,7 +119,7 @@ Stage-0
                                                     Select Operator [SEL_2] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                                       Filter Operator [FIL_78] (rows=1 width=0)
-                                                        predicate:((((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_hdemo_sk is not null) and ss_addr_sk is not null) and ss_customer_sk is not null)
+                                                        predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_hdemo_sk is not null and ss_addr_sk is not null and ss_customer_sk is not null)
                                                         TableScan [TS_0] (rows=1 width=0)
                                                           default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_ticket_number","ss_coupon_amt","ss_net_profit"]
                                                 <-Map 10 [SIMPLE_EDGE]
@@ -128,7 +128,7 @@ Stage-0
                                                     Select Operator [SEL_5] (rows=18262 width=1119)
                                                       Output:["_col0"]
                                                       Filter Operator [FIL_79] (rows=18262 width=1119)
-                                                        predicate:(((d_dow) IN (6, 0) and (d_year) IN (1998, 1999, 2000)) and d_date_sk is not null)
+                                                        predicate:((d_dow) IN (6, 0) and (d_year) IN (1998, 1999, 2000) and d_date_sk is not null)
                                                         TableScan [TS_3] (rows=73049 width=1119)
                                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_dow"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query48.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query48.q.out b/ql/src/test/results/clientpositive/perf/query48.q.out
index d536bb5..d93fc07 100644
--- a/ql/src/test/results/clientpositive/perf/query48.q.out
+++ b/ql/src/test/results/clientpositive/perf/query48.q.out
@@ -49,7 +49,7 @@ Stage-0
                           Select Operator [SEL_11] (rows=10000000 width=1014)
                             Output:["_col0","_col1"]
                             Filter Operator [FIL_52] (rows=10000000 width=1014)
-                              predicate:(((ca_state) IN ('KY', 'GA', 'NM', 'MT', 'OR', 'IN', 'WI', 'MO', 'WV') and (ca_country = 'United States')) and ca_address_sk is not null)
+                              predicate:((ca_state) IN ('KY', 'GA', 'NM', 'MT', 'OR', 'IN', 'WI', 'MO', 'WV') and (ca_country = 'United States') and ca_address_sk is not null)
                               TableScan [TS_9] (rows=40000000 width=1014)
                                 default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state","ca_country"]
                       <-Reducer 3 [SIMPLE_EDGE]
@@ -63,7 +63,7 @@ Stage-0
                               Select Operator [SEL_8] (rows=4950 width=362)
                                 Output:["_col0"]
                                 Filter Operator [FIL_51] (rows=4950 width=362)
-                                  predicate:(((cd_marital_status = 'M') and (cd_education_status = '4 yr Degree')) and cd_demo_sk is not null)
+                                  predicate:((cd_marital_status = 'M') and (cd_education_status = '4 yr Degree') and cd_demo_sk is not null)
                                   TableScan [TS_6] (rows=19800 width=362)
                                     default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                           <-Reducer 2 [SIMPLE_EDGE]
@@ -77,7 +77,7 @@ Stage-0
                                   Select Operator [SEL_2] (rows=1 width=0)
                                     Output:["_col0","_col1","_col2","_col3","_col4","_col6"]
                                     Filter Operator [FIL_49] (rows=1 width=0)
-                                      predicate:((((((ss_sales_price BETWEEN 100.0 AND 150.0 or ss_sales_price BETWEEN 50.0 AND 100.0 or ss_sales_price BETWEEN 150.0 AND 200.0) and (ss_net_profit BETWEEN 0 AND 2000 or ss_net_profit BETWEEN 150 AND 3000 or ss_net_profit BETWEEN 50 AND 25000)) and ss_store_sk is not null) and ss_cdemo_sk is not null) and ss_addr_sk is not null) and ss_sold_date_sk is not null)
+                                      predicate:((ss_sales_price BETWEEN 100.0 AND 150.0 or ss_sales_price BETWEEN 50.0 AND 100.0 or ss_sales_price BETWEEN 150.0 AND 200.0) and (ss_net_profit BETWEEN 0 AND 2000 or ss_net_profit BETWEEN 150 AND 3000 or ss_net_profit BETWEEN 50 AND 25000) and ss_store_sk is not null and ss_cdemo_sk is not null and ss_addr_sk is not null and ss_sold_date_sk is not null)
                                       TableScan [TS_0] (rows=1 width=0)
                                         default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_cdemo_sk","ss_addr_sk","ss_store_sk","ss_quantity","ss_sales_price","ss_net_profit"]
                               <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query50.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query50.q.out b/ql/src/test/results/clientpositive/perf/query50.q.out
index 4445e98..e6ba451 100644
--- a/ql/src/test/results/clientpositive/perf/query50.q.out
+++ b/ql/src/test/results/clientpositive/perf/query50.q.out
@@ -153,7 +153,7 @@ Stage-0
                           Select Operator [SEL_14] (rows=18262 width=1119)
                             Output:["_col0"]
                             Filter Operator [FIL_55] (rows=18262 width=1119)
-                              predicate:(((d_year = 2000) and (d_moy = 9)) and d_date_sk is not null)
+                              predicate:((d_year = 2000) and (d_moy = 9) and d_date_sk is not null)
                               TableScan [TS_12] (rows=73049 width=1119)
                                 default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                       <-Reducer 4 [SIMPLE_EDGE]
@@ -195,7 +195,7 @@ Stage-0
                                       Select Operator [SEL_2] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2","_col3","_col4"]
                                         Filter Operator [FIL_51] (rows=1 width=0)
-                                          predicate:((((ss_item_sk is not null and ss_customer_sk is not null) and ss_ticket_number is not null) and ss_store_sk is not null) and ss_sold_date_sk is not null)
+                                          predicate:(ss_item_sk is not null and ss_customer_sk is not null and ss_ticket_number is not null and ss_store_sk is not null and ss_sold_date_sk is not null)
                                           TableScan [TS_0] (rows=1 width=0)
                                             default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number"]
                                   <-Map 8 [SIMPLE_EDGE]
@@ -204,7 +204,7 @@ Stage-0
                                       Select Operator [SEL_5] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2","_col3"]
                                         Filter Operator [FIL_52] (rows=1 width=0)
-                                          predicate:(((sr_item_sk is not null and sr_customer_sk is not null) and sr_ticket_number is not null) and sr_returned_date_sk is not null)
+                                          predicate:(sr_item_sk is not null and sr_customer_sk is not null and sr_ticket_number is not null and sr_returned_date_sk is not null)
                                           TableScan [TS_3] (rows=1 width=0)
                                             default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query52.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query52.q.out b/ql/src/test/results/clientpositive/perf/query52.q.out
index f5bc52b..7bf7317 100644
--- a/ql/src/test/results/clientpositive/perf/query52.q.out
+++ b/ql/src/test/results/clientpositive/perf/query52.q.out
@@ -55,7 +55,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=18262 width=1119)
                                   Output:["_col0"]
                                   Filter Operator [FIL_30] (rows=18262 width=1119)
-                                    predicate:(((d_moy = 12) and (d_year = 1998)) and d_date_sk is not null)
+                                    predicate:((d_moy = 12) and (d_year = 1998) and d_date_sk is not null)
                                     TableScan [TS_0] (rows=73049 width=1119)
                                       default@date_dim,dt,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                             <-Map 6 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query54.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query54.q.out b/ql/src/test/results/clientpositive/perf/query54.q.out
index 9a0e9b4..3edf749 100644
--- a/ql/src/test/results/clientpositive/perf/query54.q.out
+++ b/ql/src/test/results/clientpositive/perf/query54.q.out
@@ -85,7 +85,7 @@ Stage-0
                                           Select Operator [SEL_36] (rows=40000000 width=1014)
                                             Output:["_col0","_col1","_col2"]
                                             Filter Operator [FIL_117] (rows=40000000 width=1014)
-                                              predicate:((ca_address_sk is not null and ca_county is not null) and ca_state is not null)
+                                              predicate:(ca_address_sk is not null and ca_county is not null and ca_state is not null)
                                               TableScan [TS_34] (rows=40000000 width=1014)
                                                 default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county","ca_state"]
                                       <-Reducer 7 [SIMPLE_EDGE]
@@ -134,7 +134,7 @@ Stage-0
                                                             Select Operator [SEL_13] (rows=18262 width=1119)
                                                               Output:["_col0"]
                                                               Filter Operator [FIL_114] (rows=18262 width=1119)
-                                                                predicate:(((d_moy = 3) and (d_year = 2000)) and d_date_sk is not null)
+                                                                predicate:((d_moy = 3) and (d_year = 2000) and d_date_sk is not null)
                                                                 TableScan [TS_11] (rows=73049 width=1119)
                                                                   default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                                         <-Reducer 3 [SIMPLE_EDGE]
@@ -148,7 +148,7 @@ Stage-0
                                                                 Select Operator [SEL_10] (rows=115500 width=1436)
                                                                   Output:["_col0"]
                                                                   Filter Operator [FIL_113] (rows=115500 width=1436)
-                                                                    predicate:(((i_category = 'Jewelry') and (i_class = 'football')) and i_item_sk is not null)
+                                                                    predicate:((i_category = 'Jewelry') and (i_class = 'football') and i_item_sk is not null)
                                                                     TableScan [TS_8] (rows=462000 width=1436)
                                                                       default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_class","i_category"]
                                                             <-Union 2 [SIMPLE_EDGE]
@@ -158,7 +158,7 @@ Stage-0
                                                                   Select Operator [SEL_2] (rows=1 width=0)
                                                                     Output:["_col0","_col1","_col2"]
                                                                     Filter Operator [FIL_111] (rows=1 width=0)
-                                                                      predicate:((cs_item_sk is not null and cs_sold_date_sk is not null) and cs_bill_customer_sk is not null)
+                                                                      predicate:(cs_item_sk is not null and cs_sold_date_sk is not null and cs_bill_customer_sk is not null)
                                                                       TableScan [TS_0] (rows=1 width=0)
                                                                         Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk"]
                                                               <-Map 14 [CONTAINS]
@@ -167,7 +167,7 @@ Stage-0
                                                                   Select Operator [SEL_5] (rows=1 width=0)
                                                                     Output:["_col0","_col1","_col2"]
                                                                     Filter Operator [FIL_112] (rows=1 width=0)
-                                                                      predicate:((ws_item_sk is not null and ws_sold_date_sk is not null) and ws_bill_customer_sk is not null)
+                                                                      predicate:(ws_item_sk is not null and ws_sold_date_sk is not null and ws_bill_customer_sk is not null)
                                                                       TableScan [TS_3] (rows=1 width=0)
                                                                         Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query55.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query55.q.out b/ql/src/test/results/clientpositive/perf/query55.q.out
index 91d0c72..a9044c1 100644
--- a/ql/src/test/results/clientpositive/perf/query55.q.out
+++ b/ql/src/test/results/clientpositive/perf/query55.q.out
@@ -53,7 +53,7 @@ Stage-0
                               Select Operator [SEL_2] (rows=18262 width=1119)
                                 Output:["_col0"]
                                 Filter Operator [FIL_30] (rows=18262 width=1119)
-                                  predicate:(((d_moy = 12) and (d_year = 2001)) and d_date_sk is not null)
+                                  predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                   TableScan [TS_0] (rows=73049 width=1119)
                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                           <-Map 6 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query64.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query64.q.out b/ql/src/test/results/clientpositive/perf/query64.q.out
index dddcc80..ff81523 100644
--- a/ql/src/test/results/clientpositive/perf/query64.q.out
+++ b/ql/src/test/results/clientpositive/perf/query64.q.out
@@ -84,7 +84,7 @@ Stage-0
                                   Select Operator [SEL_76] (rows=57750 width=1436)
                                     Output:["_col0","_col3"]
                                     Filter Operator [FIL_660] (rows=57750 width=1436)
-                                      predicate:((((i_color) IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate') and i_current_price BETWEEN 35 AND 45) and i_current_price BETWEEN 36 AND 50) and i_item_sk is not null)
+                                      predicate:((i_color) IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate') and i_current_price BETWEEN 35 AND 45 and i_current_price BETWEEN 36 AND 50 and i_item_sk is not null)
                                       TableScan [TS_74] (rows=462000 width=1436)
                                         default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_color","i_product_name"]
                               <-Reducer 16 [SIMPLE_EDGE]
@@ -228,7 +228,7 @@ Stage-0
                                                                               Select Operator [SEL_20] (rows=1704 width=1910)
                                                                                 Output:["_col0","_col1","_col2"]
                                                                                 Filter Operator [FIL_650] (rows=1704 width=1910)
-                                                                                  predicate:((s_store_sk is not null and s_store_name is not null) and s_zip is not null)
+                                                                                  predicate:(s_store_sk is not null and s_store_name is not null and s_zip is not null)
                                                                                   TableScan [TS_18] (rows=1704 width=1910)
                                                                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_zip"]
                                                                           <-Reducer 6 [SIMPLE_EDGE]
@@ -284,7 +284,7 @@ Stage-0
                                                                                               Select Operator [SEL_8] (rows=80000000 width=860)
                                                                                                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                                                                                 Filter Operator [FIL_646] (rows=80000000 width=860)
-                                                                                                  predicate:(((((c_customer_sk is not null and c_first_sales_date_sk is not null) and c_first_shipto_date_sk is not null) and c_current_cdemo_sk is not null) and c_current_hdemo_sk is not null) and c_current_addr_sk is not null)
+                                                                                                  predicate:(c_customer_sk is not null and c_first_sales_date_sk is not null and c_first_shipto_date_sk is not null and c_current_cdemo_sk is not null and c_current_hdemo_sk is not null and c_current_addr_sk is not null)
                                                                                                   TableScan [TS_6] (rows=80000000 width=860)
                                                                                                     default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_hdemo_sk","c_current_addr_sk","c_first_shipto_date_sk","c_first_sales_date_sk"]
                                                                                           <-Reducer 2 [SIMPLE_EDGE]
@@ -298,7 +298,7 @@ Stage-0
                                                                                                   Select Operator [SEL_2] (rows=1 width=0)
                                                                                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
                                                                                                     Filter Operator [FIL_644] (rows=1 width=0)
-                                                                                                      predicate:((((((((ss_item_sk is not null and ss_ticket_number is not null) and ss_customer_sk is not null) and ss_sold_date_sk is not null) and ss_store_sk is not null) and ss_cdemo_sk is not null) and ss_promo_sk is not null) and ss_hdemo_sk is not null) and ss_addr_sk is not null)
+                                                                                                      predicate:(ss_item_sk is not null and ss_ticket_number is not null and ss_customer_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_cdemo_sk is not null and ss_promo_sk is not null and ss_hdemo_sk is not null and ss_addr_sk is not null)
                                                                                                       TableScan [TS_0] (rows=1 width=0)
                                                                                                         default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
                                                                                               <-Map 21 [SIMPLE_EDGE]
@@ -368,7 +368,7 @@ Stage-0
                                   Select Operator [SEL_200] (rows=57750 width=1436)
                                     Output:["_col0","_col3"]
                                     Filter Operator [FIL_679] (rows=57750 width=1436)
-                                      predicate:((((i_color) IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate') and i_current_price BETWEEN 35 AND 45) and i_current_price BETWEEN 36 AND 50) and i_item_sk is not null)
+                                      predicate:((i_color) IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate') and i_current_price BETWEEN 35 AND 45 and i_current_price BETWEEN 36 AND 50 and i_item_sk is not null)
                                       TableScan [TS_198] (rows=462000 width=1436)
                                         default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_color","i_product_name"]
                               <-Reducer 56 [SIMPLE_EDGE]
@@ -512,7 +512,7 @@ Stage-0
                                                                               Select Operator [SEL_144] (rows=1704 width=1910)
                                                                                 Output:["_col0","_col1","_col2"]
                                                                                 Filter Operator [FIL_669] (rows=1704 width=1910)
-                                                                                  predicate:((s_store_sk is not null and s_store_name is not null) and s_zip is not null)
+                                                                                  predicate:(s_store_sk is not null and s_store_name is not null and s_zip is not null)
                                                                                   TableScan [TS_142] (rows=1704 width=1910)
                                                                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_zip"]
                                                                           <-Reducer 46 [SIMPLE_EDGE]
@@ -568,7 +568,7 @@ Stage-0
                                                                                               Select Operator [SEL_132] (rows=80000000 width=860)
                                                                                                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                                                                                 Filter Operator [FIL_665] (rows=80000000 width=860)
-                                                                                                  predicate:(((((c_customer_sk is not null and c_first_sales_date_sk is not null) and c_first_shipto_date_sk is not null) and c_current_cdemo_sk is not null) and c_current_hdemo_sk is not null) and c_current_addr_sk is not null)
+                                                                                                  predicate:(c_customer_sk is not null and c_first_sales_date_sk is not null and c_first_shipto_date_sk is not null and c_current_cdemo_sk is not null and c_current_hdemo_sk is not null and c_current_addr_sk is not null)
                                                                                                   TableScan [TS_130] (rows=80000000 width=860)
                                                                                                     default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_hdemo_sk","c_current_addr_sk","c_first_shipto_date_sk","c_first_sales_date_sk"]
                                                                                           <-Reducer 42 [SIMPLE_EDGE]
@@ -582,7 +582,7 @@ Stage-0
                                                                                                   Select Operator [SEL_126] (rows=1 width=0)
                                                                                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
                                                                                                     Filter Operator [FIL_663] (rows=1 width=0)
-                                                                                                      predicate:((((((((ss_item_sk is not null and ss_ticket_number is not null) and ss_customer_sk is not null) and ss_sold_date_sk is not null) and ss_store_sk is not null) and ss_cdemo_sk is not null) and ss_promo_sk is not null) and ss_hdemo_sk is not null) and ss_addr_sk is not null)
+                                                                                                      predicate:(ss_item_sk is not null and ss_ticket_number is not null and ss_customer_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_cdemo_sk is not null and ss_promo_sk is not null and ss_hdemo_sk is not null and ss_addr_sk is not null)
                                                                                                       TableScan [TS_124] (rows=1 width=0)
                                                                                                         default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
                                                                                               <-Map 59 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query65.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query65.q.out b/ql/src/test/results/clientpositive/perf/query65.q.out
index 37bb1b3..9673373 100644
--- a/ql/src/test/results/clientpositive/perf/query65.q.out
+++ b/ql/src/test/results/clientpositive/perf/query65.q.out
@@ -125,7 +125,7 @@ Stage-0
                                   Select Operator [SEL_27] (rows=1 width=0)
                                     Output:["_col0","_col1","_col2","_col3"]
                                     Filter Operator [FIL_68] (rows=1 width=0)
-                                      predicate:((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_item_sk is not null)
+                                      predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_item_sk is not null)
                                       TableScan [TS_25] (rows=1 width=0)
                                         default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_sales_price"]
                               <-Map 14 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query66.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query66.q.out b/ql/src/test/results/clientpositive/perf/query66.q.out
index 4127631..d698602 100644
--- a/ql/src/test/results/clientpositive/perf/query66.q.out
+++ b/ql/src/test/results/clientpositive/perf/query66.q.out
@@ -535,7 +535,7 @@ Stage-0
                                                   Select Operator [SEL_35] (rows=1 width=0)
                                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                                                     Filter Operator [FIL_110] (rows=1 width=0)
-                                                      predicate:(((cs_warehouse_sk is not null and cs_sold_date_sk is not null) and cs_sold_time_sk is not null) and cs_ship_mode_sk is not null)
+                                                      predicate:(cs_warehouse_sk is not null and cs_sold_date_sk is not null and cs_sold_time_sk is not null and cs_ship_mode_sk is not null)
                                                       TableScan [TS_33] (rows=1 width=0)
                                                         default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_ship_mode_sk","cs_warehouse_sk","cs_quantity","cs_ext_sales_price","cs_net_paid_inc_ship_tax"]
                                               <-Map 20 [SIMPLE_EDGE]
@@ -615,7 +615,7 @@ Stage-0
                                                   Select Operator [SEL_2] (rows=1 width=0)
                                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                                                     Filter Operator [FIL_105] (rows=1 width=0)
-                                                      predicate:(((ws_warehouse_sk is not null and ws_sold_date_sk is not null) and ws_sold_time_sk is not null) and ws_ship_mode_sk is not null)
+                                                      predicate:(ws_warehouse_sk is not null and ws_sold_date_sk is not null and ws_sold_time_sk is not null and ws_ship_mode_sk is not null)
                                                       TableScan [TS_0] (rows=1 width=0)
                                                         default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_ship_mode_sk","ws_warehouse_sk","ws_quantity","ws_sales_price","ws_net_paid_inc_tax"]
                                               <-Map 10 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query67.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query67.q.out b/ql/src/test/results/clientpositive/perf/query67.q.out
index 83dab38..4a1e7a6 100644
--- a/ql/src/test/results/clientpositive/perf/query67.q.out
+++ b/ql/src/test/results/clientpositive/perf/query67.q.out
@@ -164,7 +164,7 @@ Stage-0
                                               Select Operator [SEL_2] (rows=1 width=0)
                                                 Output:["_col0","_col1","_col2","_col3","_col4"]
                                                 Filter Operator [FIL_48] (rows=1 width=0)
-                                                  predicate:((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_item_sk is not null)
+                                                  predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_item_sk is not null)
                                                   TableScan [TS_0] (rows=1 width=0)
                                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_quantity","ss_sales_price"]
                                           <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query68.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query68.q.out b/ql/src/test/results/clientpositive/perf/query68.q.out
index 38e4644..df253ae 100644
--- a/ql/src/test/results/clientpositive/perf/query68.q.out
+++ b/ql/src/test/results/clientpositive/perf/query68.q.out
@@ -119,7 +119,7 @@ Stage-0
                                                     Select Operator [SEL_2] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
                                                       Filter Operator [FIL_78] (rows=1 width=0)
-                                                        predicate:((((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_hdemo_sk is not null) and ss_addr_sk is not null) and ss_customer_sk is not null)
+                                                        predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_hdemo_sk is not null and ss_addr_sk is not null and ss_customer_sk is not null)
                                                         TableScan [TS_0] (rows=1 width=0)
                                                           default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_ticket_number","ss_ext_sales_price","ss_ext_list_price","ss_ext_tax"]
                                                 <-Map 10 [SIMPLE_EDGE]
@@ -128,7 +128,7 @@ Stage-0
                                                     Select Operator [SEL_5] (rows=18262 width=1119)
                                                       Output:["_col0"]
                                                       Filter Operator [FIL_79] (rows=18262 width=1119)
-                                                        predicate:(((d_year) IN (1998, 1999, 2000) and d_dom BETWEEN 1 AND 2) and d_date_sk is not null)
+                                                        predicate:((d_year) IN (1998, 1999, 2000) and d_dom BETWEEN 1 AND 2 and d_date_sk is not null)
                                                         TableScan [TS_3] (rows=73049 width=1119)
                                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_dom"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query7.q.out b/ql/src/test/results/clientpositive/perf/query7.q.out
index b5a8254..be336ac 100644
--- a/ql/src/test/results/clientpositive/perf/query7.q.out
+++ b/ql/src/test/results/clientpositive/perf/query7.q.out
@@ -83,7 +83,7 @@ Stage-0
                                       Select Operator [SEL_2] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                         Filter Operator [FIL_50] (rows=1 width=0)
-                                          predicate:(((ss_cdemo_sk is not null and ss_sold_date_sk is not null) and ss_item_sk is not null) and ss_promo_sk is not null)
+                                          predicate:(ss_cdemo_sk is not null and ss_sold_date_sk is not null and ss_item_sk is not null and ss_promo_sk is not null)
                                           TableScan [TS_0] (rows=1 width=0)
                                             default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_cdemo_sk","ss_promo_sk","ss_quantity","ss_list_price","ss_sales_price","ss_coupon_amt"]
                                   <-Map 8 [SIMPLE_EDGE]
@@ -92,7 +92,7 @@ Stage-0
                                       Select Operator [SEL_5] (rows=2475 width=362)
                                         Output:["_col0"]
                                         Filter Operator [FIL_51] (rows=2475 width=362)
-                                          predicate:((((cd_gender = 'F') and (cd_marital_status = 'W')) and (cd_education_status = 'Primary')) and cd_demo_sk is not null)
+                                          predicate:((cd_gender = 'F') and (cd_marital_status = 'W') and (cd_education_status = 'Primary') and cd_demo_sk is not null)
                                           TableScan [TS_3] (rows=19800 width=362)
                                             default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query71.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query71.q.out b/ql/src/test/results/clientpositive/perf/query71.q.out
index 504705b..190dc22 100644
--- a/ql/src/test/results/clientpositive/perf/query71.q.out
+++ b/ql/src/test/results/clientpositive/perf/query71.q.out
@@ -71,7 +71,7 @@ Stage-0
                                     Select Operator [SEL_15] (rows=1 width=0)
                                       Output:["_col0","_col1","_col2","_col3"]
                                       Filter Operator [FIL_78] (rows=1 width=0)
-                                        predicate:((cs_sold_date_sk is not null and cs_item_sk is not null) and cs_sold_time_sk is not null)
+                                        predicate:(cs_sold_date_sk is not null and cs_item_sk is not null and cs_sold_time_sk is not null)
                                         TableScan [TS_13] (rows=1 width=0)
                                           default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_sold_time_sk","cs_item_sk","cs_ext_sales_price"]
                                 <-Map 12 [SIMPLE_EDGE]
@@ -80,7 +80,7 @@ Stage-0
                                     Select Operator [SEL_18] (rows=18262 width=1119)
                                       Output:["_col0"]
                                       Filter Operator [FIL_79] (rows=18262 width=1119)
-                                        predicate:(((d_moy = 12) and (d_year = 2001)) and d_date_sk is not null)
+                                        predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                         TableScan [TS_16] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                           <-Reducer 14 [CONTAINS]
@@ -96,7 +96,7 @@ Stage-0
                                     Select Operator [SEL_27] (rows=1 width=0)
                                       Output:["_col0","_col1","_col2","_col3"]
                                       Filter Operator [FIL_80] (rows=1 width=0)
-                                        predicate:((ss_sold_date_sk is not null and ss_item_sk is not null) and ss_sold_time_sk is not null)
+                                        predicate:(ss_sold_date_sk is not null and ss_item_sk is not null and ss_sold_time_sk is not null)
                                         TableScan [TS_25] (rows=1 width=0)
                                           default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_sold_time_sk","ss_item_sk","ss_ext_sales_price"]
                                 <-Map 15 [SIMPLE_EDGE]
@@ -105,7 +105,7 @@ Stage-0
                                     Select Operator [SEL_30] (rows=18262 width=1119)
                                       Output:["_col0"]
                                       Filter Operator [FIL_81] (rows=18262 width=1119)
-                                        predicate:(((d_moy = 12) and (d_year = 2001)) and d_date_sk is not null)
+                                        predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                         TableScan [TS_28] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                           <-Reducer 7 [CONTAINS]
@@ -121,7 +121,7 @@ Stage-0
                                     Select Operator [SEL_5] (rows=1 width=0)
                                       Output:["_col0","_col1","_col2","_col3"]
                                       Filter Operator [FIL_76] (rows=1 width=0)
-                                        predicate:((ws_sold_date_sk is not null and ws_item_sk is not null) and ws_sold_time_sk is not null)
+                                        predicate:(ws_sold_date_sk is not null and ws_item_sk is not null and ws_sold_time_sk is not null)
                                         TableScan [TS_3] (rows=1 width=0)
                                           default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_sold_time_sk","ws_item_sk","ws_ext_sales_price"]
                                 <-Map 9 [SIMPLE_EDGE]
@@ -130,7 +130,7 @@ Stage-0
                                     Select Operator [SEL_8] (rows=18262 width=1119)
                                       Output:["_col0"]
                                       Filter Operator [FIL_77] (rows=18262 width=1119)
-                                        predicate:(((d_moy = 12) and (d_year = 2001)) and d_date_sk is not null)
+                                        predicate:((d_moy = 12) and (d_year = 2001) and d_date_sk is not null)
                                         TableScan [TS_6] (rows=73049 width=1119)
                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query72.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query72.q.out b/ql/src/test/results/clientpositive/perf/query72.q.out
index 6d8bd36..5c35a17 100644
--- a/ql/src/test/results/clientpositive/perf/query72.q.out
+++ b/ql/src/test/results/clientpositive/perf/query72.q.out
@@ -105,7 +105,7 @@ Stage-0
                                               Select Operator [SEL_25] (rows=36524 width=1119)
                                                 Output:["_col0","_col1","_col2"]
                                                 Filter Operator [FIL_128] (rows=36524 width=1119)
-                                                  predicate:(((d_year = 2001) and d_date_sk is not null) and d_week_seq is not null)
+                                                  predicate:((d_year = 2001) and d_date_sk is not null and d_week_seq is not null)
                                                   TableScan [TS_23] (rows=73049 width=1119)
                                                     default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date","d_week_seq","d_year"]
                                           <-Reducer 6 [SIMPLE_EDGE]
@@ -179,7 +179,7 @@ Stage-0
                                                                       Select Operator [SEL_2] (rows=1 width=0)
                                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                                                         Filter Operator [FIL_122] (rows=1 width=0)
-                                                                          predicate:((((cs_item_sk is not null and cs_bill_cdemo_sk is not null) and cs_bill_hdemo_sk is not null) and cs_sold_date_sk is not null) and cs_ship_date_sk is not null)
+                                                                          predicate:(cs_item_sk is not null and cs_bill_cdemo_sk is not null and cs_bill_hdemo_sk is not null and cs_sold_date_sk is not null and cs_ship_date_sk is not null)
                                                                           TableScan [TS_0] (rows=1 width=0)
                                                                             default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_ship_date_sk","cs_bill_cdemo_sk","cs_bill_hdemo_sk","cs_item_sk","cs_promo_sk","cs_order_number","cs_quantity"]
                                                                   <-Map 14 [SIMPLE_EDGE]
@@ -188,7 +188,7 @@ Stage-0
                                                                       Select Operator [SEL_5] (rows=1 width=0)
                                                                         Output:["_col0","_col1","_col2","_col3"]
                                                                         Filter Operator [FIL_123] (rows=1 width=0)
-                                                                          predicate:((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null)
+                                                                          predicate:(inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null)
                                                                           TableScan [TS_3] (rows=1 width=0)
                                                                             default@inventory,inventory,Tbl:PARTIAL,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query73.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query73.q.out b/ql/src/test/results/clientpositive/perf/query73.q.out
index cf3a75e..cb22c8a 100644
--- a/ql/src/test/results/clientpositive/perf/query73.q.out
+++ b/ql/src/test/results/clientpositive/perf/query73.q.out
@@ -57,7 +57,7 @@ Stage-0
                                 Select Operator [SEL_11] (rows=1200 width=107)
                                   Output:["_col0"]
                                   Filter Operator [FIL_55] (rows=1200 width=107)
-                                    predicate:(((((hd_buy_potential = '1001-5000') or (hd_buy_potential = '5001-10000')) and (hd_vehicle_count > 0)) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.0)) ELSE (null) END) and hd_demo_sk is not null)
+                                    predicate:(((hd_buy_potential = '1001-5000') or (hd_buy_potential = '5001-10000')) and (hd_vehicle_count > 0) and CASE WHEN ((hd_vehicle_count > 0)) THEN (((UDFToDouble(hd_dep_count) / UDFToDouble(hd_vehicle_count)) > 1.0)) ELSE (null) END and hd_demo_sk is not null)
                                     TableScan [TS_9] (rows=7200 width=107)
                                       default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_buy_potential","hd_dep_count","hd_vehicle_count"]
                             <-Reducer 3 [SIMPLE_EDGE]
@@ -85,7 +85,7 @@ Stage-0
                                         Select Operator [SEL_2] (rows=1 width=0)
                                           Output:["_col0","_col1","_col2","_col3","_col4"]
                                           Filter Operator [FIL_52] (rows=1 width=0)
-                                            predicate:(((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_hdemo_sk is not null) and ss_customer_sk is not null)
+                                            predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_hdemo_sk is not null and ss_customer_sk is not null)
                                             TableScan [TS_0] (rows=1 width=0)
                                               default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_hdemo_sk","ss_store_sk","ss_ticket_number"]
                                     <-Map 8 [SIMPLE_EDGE]
@@ -94,7 +94,7 @@ Stage-0
                                         Select Operator [SEL_5] (rows=18262 width=1119)
                                           Output:["_col0"]
                                           Filter Operator [FIL_53] (rows=18262 width=1119)
-                                            predicate:(((d_year) IN (1998, 1999, 2000) and d_dom BETWEEN 1 AND 2) and d_date_sk is not null)
+                                            predicate:((d_year) IN (1998, 1999, 2000) and d_dom BETWEEN 1 AND 2 and d_date_sk is not null)
                                             TableScan [TS_3] (rows=73049 width=1119)
                                               default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_dom"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query75.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query75.q.out b/ql/src/test/results/clientpositive/perf/query75.q.out
index 25a8776..35729a2 100644
--- a/ql/src/test/results/clientpositive/perf/query75.q.out
+++ b/ql/src/test/results/clientpositive/perf/query75.q.out
@@ -106,7 +106,7 @@ Stage-0
                                               Select Operator [SEL_79] (rows=231000 width=1436)
                                                 Output:["_col0","_col1","_col2","_col3","_col5"]
                                                 Filter Operator [FIL_230] (rows=231000 width=1436)
-                                                  predicate:((((((i_category = 'Sports') and i_item_sk is not null) and i_brand_id is not null) and i_class_id is not null) and i_category_id is not null) and i_manufact_id is not null)
+                                                  predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                                   TableScan [TS_77] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                         <-Reducer 38 [CONTAINS]
@@ -163,7 +163,7 @@ Stage-0
                                               Select Operator [SEL_101] (rows=231000 width=1436)
                                                 Output:["_col0","_col1","_col2","_col3","_col5"]
                                                 Filter Operator [FIL_234] (rows=231000 width=1436)
-                                                  predicate:((((((i_category = 'Sports') and i_item_sk is not null) and i_brand_id is not null) and i_class_id is not null) and i_category_id is not null) and i_manufact_id is not null)
+                                                  predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                                   TableScan [TS_99] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                         <-Reducer 45 [CONTAINS]
@@ -220,7 +220,7 @@ Stage-0
                                               Select Operator [SEL_125] (rows=231000 width=1436)
                                                 Output:["_col0","_col1","_col2","_col3","_col5"]
                                                 Filter Operator [FIL_238] (rows=231000 width=1436)
-                                                  predicate:((((((i_category = 'Sports') and i_item_sk is not null) and i_brand_id is not null) and i_class_id is not null) and i_category_id is not null) and i_manufact_id is not null)
+                                                  predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                                   TableScan [TS_123] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                   <-Reducer 6 [SIMPLE_EDGE]
@@ -283,7 +283,7 @@ Stage-0
                                               Select Operator [SEL_27] (rows=231000 width=1436)
                                                 Output:["_col0","_col1","_col2","_col3","_col5"]
                                                 Filter Operator [FIL_222] (rows=231000 width=1436)
-                                                  predicate:((((((i_category = 'Sports') and i_item_sk is not null) and i_brand_id is not null) and i_class_id is not null) and i_category_id is not null) and i_manufact_id is not null)
+                                                  predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                                   TableScan [TS_25] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                         <-Reducer 22 [CONTAINS]
@@ -340,7 +340,7 @@ Stage-0
                                               Select Operator [SEL_51] (rows=231000 width=1436)
                                                 Output:["_col0","_col1","_col2","_col3","_col5"]
                                                 Filter Operator [FIL_226] (rows=231000 width=1436)
-                                                  predicate:((((((i_category = 'Sports') and i_item_sk is not null) and i_brand_id is not null) and i_class_id is not null) and i_category_id is not null) and i_manufact_id is not null)
+                                                  predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                                   TableScan [TS_49] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
                         <-Reducer 4 [CONTAINS]
@@ -397,7 +397,7 @@ Stage-0
                                               Select Operator [SEL_5] (rows=231000 width=1436)
                                                 Output:["_col0","_col1","_col2","_col3","_col5"]
                                                 Filter Operator [FIL_218] (rows=231000 width=1436)
-                                                  predicate:((((((i_category = 'Sports') and i_item_sk is not null) and i_brand_id is not null) and i_class_id is not null) and i_category_id is not null) and i_manufact_id is not null)
+                                                  predicate:((i_category = 'Sports') and i_item_sk is not null and i_brand_id is not null and i_class_id is not null and i_category_id is not null and i_manufact_id is not null)
                                                   TableScan [TS_3] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand_id","i_class_id","i_category_id","i_category","i_manufact_id"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query76.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query76.q.out b/ql/src/test/results/clientpositive/perf/query76.q.out
index c90578b..170c18e 100644
--- a/ql/src/test/results/clientpositive/perf/query76.q.out
+++ b/ql/src/test/results/clientpositive/perf/query76.q.out
@@ -67,7 +67,7 @@ Stage-0
                                 Select Operator [SEL_18] (rows=1 width=0)
                                   Output:["_col0","_col1","_col3"]
                                   Filter Operator [FIL_78] (rows=1 width=0)
-                                    predicate:((ws_web_page_sk is null and ws_item_sk is not null) and ws_sold_date_sk is not null)
+                                    predicate:(ws_web_page_sk is null and ws_item_sk is not null and ws_sold_date_sk is not null)
                                     TableScan [TS_16] (rows=1 width=0)
                                       default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_page_sk","ws_ext_sales_price"]
                 <-Reducer 16 [CONTAINS]
@@ -99,7 +99,7 @@ Stage-0
                                 Select Operator [SEL_36] (rows=1 width=0)
                                   Output:["_col0","_col2","_col3"]
                                   Filter Operator [FIL_81] (rows=1 width=0)
-                                    predicate:((cs_warehouse_sk is null and cs_item_sk is not null) and cs_sold_date_sk is not null)
+                                    predicate:(cs_warehouse_sk is null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                     TableScan [TS_34] (rows=1 width=0)
                                       default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_ext_sales_price"]
                             <-Map 17 [SIMPLE_EDGE]
@@ -140,7 +140,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=1 width=0)
                                   Output:["_col0","_col1","_col3"]
                                   Filter Operator [FIL_75] (rows=1 width=0)
-                                    predicate:((ss_addr_sk is null and ss_item_sk is not null) and ss_sold_date_sk is not null)
+                                    predicate:(ss_addr_sk is null and ss_item_sk is not null and ss_sold_date_sk is not null)
                                     TableScan [TS_0] (rows=1 width=0)
                                       default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_addr_sk","ss_ext_sales_price"]
                             <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query79.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query79.q.out b/ql/src/test/results/clientpositive/perf/query79.q.out
index bf537b9..a17eb84 100644
--- a/ql/src/test/results/clientpositive/perf/query79.q.out
+++ b/ql/src/test/results/clientpositive/perf/query79.q.out
@@ -87,7 +87,7 @@ Stage-0
                                           Select Operator [SEL_2] (rows=1 width=0)
                                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                             Filter Operator [FIL_52] (rows=1 width=0)
-                                              predicate:(((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_hdemo_sk is not null) and ss_customer_sk is not null)
+                                              predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_hdemo_sk is not null and ss_customer_sk is not null)
                                               TableScan [TS_0] (rows=1 width=0)
                                                 default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_ticket_number","ss_coupon_amt","ss_net_profit"]
                                       <-Map 8 [SIMPLE_EDGE]
@@ -96,7 +96,7 @@ Stage-0
                                           Select Operator [SEL_5] (rows=18262 width=1119)
                                             Output:["_col0"]
                                             Filter Operator [FIL_53] (rows=18262 width=1119)
-                                              predicate:(((d_year) IN (1998, 1999, 2000) and (d_dow = 1)) and d_date_sk is not null)
+                                              predicate:((d_year) IN (1998, 1999, 2000) and (d_dow = 1) and d_date_sk is not null)
                                               TableScan [TS_3] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_dow"]
 


[24/58] [abbrv] hive git commit: HIVE-11806 : Create test for HIVE11174 (Vikram Dixit via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-11806 : Create test for HIVE11174 (Vikram Dixit via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4eef55b9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4eef55b9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4eef55b9

Branch: refs/heads/llap
Commit: 4eef55b943baa63248aa750aa8e05f5d8a0df4ad
Parents: 58dcd76
Author: Vikram Dixit K <vi...@apache.org>
Authored: Fri Sep 11 16:00:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Mon Apr 11 23:23:50 2016 -0700

----------------------------------------------------------------------
 .../test/queries/clientpositive/float_equality.q   |  3 +++
 .../results/clientpositive/float_equality.q.out    | 17 +++++++++++++++++
 2 files changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4eef55b9/ql/src/test/queries/clientpositive/float_equality.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/float_equality.q b/ql/src/test/queries/clientpositive/float_equality.q
new file mode 100644
index 0000000..216ce40
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/float_equality.q
@@ -0,0 +1,3 @@
+select 1 where 0.0=-0.0;
+
+select 1 where -0.0<0.0;

http://git-wip-us.apache.org/repos/asf/hive/blob/4eef55b9/ql/src/test/results/clientpositive/float_equality.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/float_equality.q.out b/ql/src/test/results/clientpositive/float_equality.q.out
new file mode 100644
index 0000000..319c6e7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/float_equality.q.out
@@ -0,0 +1,17 @@
+PREHOOK: query: select 1 where 0.0=-0.0
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select 1 where 0.0=-0.0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+1
+PREHOOK: query: select 1 where -0.0<0.0
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####
+POSTHOOK: query: select 1 where -0.0<0.0
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+#### A masked pattern was here ####


[49/58] [abbrv] hive git commit: HIVE-13522 : regexp_extract.q hangs on master (Ashutosh Chauhan via Thejas Nair)

Posted by jd...@apache.org.
HIVE-13522 : regexp_extract.q hangs on master (Ashutosh Chauhan via Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d567773f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d567773f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d567773f

Branch: refs/heads/llap
Commit: d567773ff4afe3a23a026e2f4e381c0fe897195b
Parents: cedb6de
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Thu Apr 14 14:57:39 2016 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Apr 14 14:57:39 2016 -0700

----------------------------------------------------------------------
 itests/qtest/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d567773f/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index a479557..b042774 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -431,7 +431,7 @@
                   templatePath="${basedir}/${hive.path.to.root}/ql/src/test/templates/" template="TestCliDriver.vm"
                   queryDirectory="${basedir}/${hive.path.to.root}/ql/src/test/queries/clientpositive/"
                   queryFile="${qfile}"
-                  excludeQueryFile="${minillap.query.files},${minimr.query.files},${minitez.query.files},${encrypted.query.files},${spark.only.query.files},${disabled.query.files}"
+                  excludeQueryFile="${minillap.query.files},${minimr.query.files},${minitez.query.files},${encrypted.query.files},${spark.only.query.files},${disabled.query.files},regexp_extract.q"
                   queryFileRegex="${qfile_regex}"
                   clusterMode="${clustermode}"
                   runDisabled="${run_disabled}"


[12/58] [abbrv] hive git commit: HIVE-13320 : Apply HIVE-11544 to explicit conversions as well as implicit ones (Nita Dembla, Gopal V via Gunther Hagleitner) Signed-off-by: Ashutosh Chauhan

Posted by jd...@apache.org.
HIVE-13320 : Apply HIVE-11544 to explicit conversions as well as implicit ones (Nita Dembla, Gopal V via Gunther Hagleitner)
Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e98f7ac5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e98f7ac5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e98f7ac5

Branch: refs/heads/llap
Commit: e98f7ac555c946c481e8796c9b2d40b790a19b22
Parents: eb56666
Author: Nita Dembla <nd...@hortonworks.com>
Authored: Thu Apr 7 09:54:00 2016 -0500
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Apr 9 15:31:31 2016 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java    | 7 +++++--
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java  | 4 ++++
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java   | 4 ++++
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java | 4 ++++
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java    | 4 ++++
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java   | 4 ++++
 6 files changed, 25 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e98f7ac5/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
index 159dd0f..efae82d 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToByte.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.hive.serde2.lazy.LazyByte;
+import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -166,9 +167,11 @@ public class UDFToByte extends UDF {
     if (i == null) {
       return null;
     } else {
+      if (!LazyUtils.isNumberMaybe(i.getBytes(), 0, i.getLength())) {
+          return null;
+        }
       try {
-        byteWritable
-            .set(LazyByte.parseByte(i.getBytes(), 0, i.getLength(), 10));
+        byteWritable.set(LazyByte.parseByte(i.getBytes(), 0, i.getLength(), 10));
         return byteWritable;
       } catch (NumberFormatException e) {
         // MySQL returns 0 if the string is not a well-formed numeric value.

http://git-wip-us.apache.org/repos/asf/hive/blob/e98f7ac5/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
index 5763947..e932f11 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToDouble.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -164,6 +165,9 @@ public class UDFToDouble extends UDF {
     if (i == null) {
       return null;
     } else {
+      if (!LazyUtils.isNumberMaybe(i.getBytes(), 0, i.getLength())) {
+        return null;
+      }
       try {
         doubleWritable.set(Double.valueOf(i.toString()));
         return doubleWritable;

http://git-wip-us.apache.org/repos/asf/hive/blob/e98f7ac5/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
index e2183f4..119eaca 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToFloat.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hive.serde2.io.DoubleWritable;
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
+import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -165,6 +166,9 @@ public class UDFToFloat extends UDF {
     if (i == null) {
       return null;
     } else {
+      if (!LazyUtils.isNumberMaybe(i.getBytes(), 0, i.getLength())) {
+        return null;
+      }
       try {
         floatWritable.set(Float.valueOf(i.toString()));
         return floatWritable;

http://git-wip-us.apache.org/repos/asf/hive/blob/e98f7ac5/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
index 5f5d1fe..fc6540e 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToInteger.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
+import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -167,6 +168,9 @@ public class UDFToInteger extends UDF {
     if (i == null) {
       return null;
     } else {
+      if (!LazyUtils.isNumberMaybe(i.getBytes(), 0, i.getLength())) {
+        return null;
+      }
       try {
         intWritable.set(LazyInteger
             .parseInt(i.getBytes(), 0, i.getLength(), 10));

http://git-wip-us.apache.org/repos/asf/hive/blob/e98f7ac5/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
index 3eeabea..3d85abd 100755
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToLong.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.hive.serde2.lazy.LazyLong;
+import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -177,6 +178,9 @@ public class UDFToLong extends UDF {
     if (i == null) {
       return null;
     } else {
+      if (!LazyUtils.isNumberMaybe(i.getBytes(), 0, i.getLength())) {
+        return null;
+      }
       try {
         longWritable
             .set(LazyLong.parseLong(i.getBytes(), 0, i.getLength(), 10));

http://git-wip-us.apache.org/repos/asf/hive/blob/e98f7ac5/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
index b9065b2..24533d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFToShort.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.serde2.io.ShortWritable;
 import org.apache.hadoop.hive.serde2.io.TimestampWritable;
 import org.apache.hadoop.hive.serde2.lazy.LazyShort;
+import org.apache.hadoop.hive.serde2.lazy.LazyUtils;
 import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.FloatWritable;
 import org.apache.hadoop.io.IntWritable;
@@ -167,6 +168,9 @@ public class UDFToShort extends UDF {
     if (i == null) {
       return null;
     } else {
+      if (!LazyUtils.isNumberMaybe(i.getBytes(), 0, i.getLength())) {
+        return null;
+      }
       try {
         shortWritable.set(LazyShort.parseShort(i.getBytes(), 0, i.getLength(),
             10));


[03/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
index 9cab06c..c8f6cd7 100644
--- a/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_vectorized_dynamic_partition_pruning.q.out
@@ -58,10 +58,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
@@ -71,6 +71,7 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -89,7 +90,6 @@ STAGE PLANS:
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                         name: default.srcpart_date
-            Execution mode: vectorized
 
   Stage: Stage-0
     Move Operator
@@ -210,16 +210,20 @@ STAGE PLANS:
                       expressions: ds (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
             Execution mode: vectorized
 
   Stage: Stage-1
@@ -235,11 +239,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -249,11 +257,15 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
@@ -261,8 +273,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -274,6 +286,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -287,7 +300,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -336,11 +348,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -350,11 +366,15 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
@@ -362,8 +382,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -375,6 +395,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -388,7 +409,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -459,40 +479,49 @@ STAGE PLANS:
                       expressions: ds (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
             Execution mode: vectorized
         Map 8 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: hr (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: hr
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: hr
+                            Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
+            Execution mode: vectorized
 
   Stage: Stage-1
     Spark
@@ -507,12 +536,16 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: hr (type: string)
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -522,40 +555,49 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: string)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: string)
+                    Select Operator
+                      expressions: hr (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
-                outputColumnNames: _col3
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string)
+                  key expressions: _col1 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col3 (type: string)
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -563,8 +605,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col3 (type: string)
-                  1 hr (type: string)
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -576,6 +618,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -589,7 +632,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -645,12 +687,16 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: (ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: hr (type: string)
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col1 (type: string)
         Map 5 
             Map Operator Tree:
                 TableScan
@@ -660,40 +706,49 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = '2008-04-08')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: string)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: string)
+                    Select Operator
+                      expressions: hr (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
-                outputColumnNames: _col3
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col3 (type: string)
+                  key expressions: _col1 (type: string)
                   sort order: +
-                  Map-reduce partition columns: _col3 (type: string)
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
             Reduce Operator Tree:
@@ -701,8 +756,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 _col3 (type: string)
-                  1 hr (type: string)
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -714,6 +769,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -727,7 +783,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -789,48 +844,58 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: ds (type: string)
-                      outputColumnNames: _col0
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
+            Execution mode: vectorized
         Map 6 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: hr (type: string)
-                      outputColumnNames: _col0
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col2 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: hr
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: hr
+                            Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
+            Execution mode: vectorized
 
   Stage: Stage-1
     Spark
@@ -844,33 +909,42 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string), hr (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: ds (type: string), hr (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: ds (type: string), hr (type: string)
+                    Select Operator
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col2 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col2 (type: string)
+                        Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string), hr (type: string)
-                  1 ds (type: string), hr (type: string)
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col2 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -882,6 +956,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -895,7 +970,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -944,33 +1018,42 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: (ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string), hr (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: ds (type: string), hr (type: string)
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                  filterExpr: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean)
+                    predicate: (ds is not null and hr is not null and (date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string), hr (type: string)
-                      sort order: ++
-                      Map-reduce partition columns: ds (type: string), hr (type: string)
+                    Select Operator
+                      expressions: ds (type: string), hr (type: string)
+                      outputColumnNames: _col0, _col2
                       Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col2 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col2 (type: string)
+                        Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string), hr (type: string)
-                  1 ds (type: string), hr (type: string)
+                  0 _col0 (type: string), _col1 (type: string)
+                  1 _col0 (type: string), _col2 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -982,6 +1065,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -995,7 +1079,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1062,16 +1145,20 @@ STAGE PLANS:
                       expressions: ds (type: string)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: ds
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                          target column name: ds
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: ds
+                            Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                            target column name: ds
+                            target work: Map 1
             Execution mode: vectorized
 
   Stage: Stage-1
@@ -1087,11 +1174,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1101,11 +1192,15 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
@@ -1113,8 +1208,8 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1126,6 +1221,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1139,7 +1235,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1188,11 +1283,15 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: ds (type: string)
-                    sort order: +
-                    Map-reduce partition columns: ds (type: string)
+                  Select Operator
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1202,20 +1301,24 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean)
                     Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: ds (type: string)
-                      sort order: +
-                      Map-reduce partition columns: ds (type: string)
+                    Select Operator
+                      expressions: ds (type: string)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
-            Execution mode: vectorized
-        Reducer 2 
-            Reduce Operator Tree:
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
+        Reducer 2 
+            Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 ds (type: string)
-                  1 ds (type: string)
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
                 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
@@ -1227,6 +1330,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1240,7 +1344,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1296,25 +1399,30 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+                      expressions: hr (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
+                      Select Operator
+                        expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: UDFToDouble(hr)
+                        Group By Operator
+                          keys: _col0 (type: double)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: UDFToDouble(hr)
+                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
+            Execution mode: vectorized
 
   Stage: Stage-1
     Spark
@@ -1327,39 +1435,45 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: UDFToDouble(hr) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToDouble(hr) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToDouble(hr) (type: double)
+                      key expressions: UDFToDouble(_col0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: UDFToDouble(hr) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      sort order: +
-                      Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        sort order: +
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToDouble(hr) (type: double)
-                  1 UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 UDFToDouble(_col0) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1370,6 +1484,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1383,7 +1498,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1428,25 +1542,30 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: hr (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: double)
-                        mode: hash
+                      Select Operator
+                        expressions: _col0 (type: double)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: (hr * 2)
+                        Group By Operator
+                          keys: _col0 (type: double)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: (UDFToDouble(hr) * 2.0)
+                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
+            Execution mode: vectorized
 
   Stage: Stage-1
     Spark
@@ -1459,39 +1578,45 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: (hr * 2) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (hr * 2) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (hr * 2) (type: double)
+                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (hr * 2) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: double)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (hr * 2) (type: double)
-                  1 hr (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  1 _col0 (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1502,6 +1627,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1515,7 +1641,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1562,39 +1687,45 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: UDFToDouble(hr) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToDouble(hr) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToDouble(hr) (type: double)
+                      key expressions: UDFToDouble(_col0) (type: double)
                       sort order: +
-                      Map-reduce partition columns: UDFToDouble(hr) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: UDFToDouble(_col0) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                      sort order: +
-                      Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        sort order: +
+                        Map-reduce partition columns: UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToDouble(hr) (type: double)
-                  1 UDFToDouble(UDFToInteger((hr / 2))) (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 UDFToDouble(_col0) (type: double)
+                  1 UDFToDouble(UDFToInteger((_col0 / 2.0))) (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1605,6 +1736,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1618,7 +1750,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1665,39 +1796,45 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: (hr * 2) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (hr * 2) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: (hr * 2) (type: double)
+                      key expressions: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
                       sort order: +
-                      Map-reduce partition columns: (hr * 2) (type: double)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (hr is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (hr is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: hr (type: double)
-                      sort order: +
-                      Map-reduce partition columns: hr (type: double)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: double)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: double)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 (hr * 2) (type: double)
-                  1 hr (type: double)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 (UDFToDouble(_col0) * UDFToDouble(2)) (type: double)
+                  1 _col0 (type: double)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1708,6 +1845,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1721,7 +1859,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1779,25 +1916,30 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: UDFToString(hr) (type: string)
+                      expressions: hr (type: double)
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
+                      Select Operator
+                        expressions: UDFToString(_col0) (type: string)
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                        Spark Partition Pruning Sink Operator
-                          partition key expr: UDFToString((hr * 2))
+                        Group By Operator
+                          keys: _col0 (type: string)
+                          mode: hash
+                          outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                          target column name: hr
-                          target work: Map 1
+                          Spark Partition Pruning Sink Operator
+                            partition key expr: UDFToString((UDFToDouble(hr) * 2.0))
+                            Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                            target column name: hr
+                            target work: Map 1
+            Execution mode: vectorized
 
   Stage: Stage-1
     Spark
@@ -1810,39 +1952,45 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: UDFToString((hr * 2)) is not null (type: boolean)
+                  filterExpr: hr is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToString((hr * 2)) is not null (type: boolean)
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: hr (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: UDFToString((hr * 2)) (type: string)
+                      key expressions: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
                       sort order: +
-                      Map-reduce partition columns: UDFToString((hr * 2)) (type: string)
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Map-reduce partition columns: UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_double_hour
-                  filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                  filterExpr: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean)
+                    predicate: (hr is not null and (UDFToDouble(hour) = 11.0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToString(hr) (type: string)
-                      sort order: +
-                      Map-reduce partition columns: UDFToString(hr) (type: string)
+                    Select Operator
+                      expressions: hr (type: double)
+                      outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: UDFToString(_col0) (type: string)
+                        sort order: +
+                        Map-reduce partition columns: UDFToString(_col0) (type: string)
+                        Statistics: Num rows: 1 Data size: 94 Basic stats: COMPLETE Column stats: NONE
+            Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 UDFToString((hr * 2)) (type: string)
-                  1 UDFToString(hr) (type: string)
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 UDFToString((UDFToDouble(_col0) * UDFToDouble(2))) (type: string)
+                  1 UDFToString(_col0) (type: string)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1853,6 +2001,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1866,7 +2015,6 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -1906,7 +2054,6 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -1914,15 +2061,65 @@ POSTHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
-  Stage-1 is a root stage
+  Stage-2 is a root stage
+  Stage-1 depends on stages: Stage-2
   Stage-0 depends on stages: Stage-1
 
 STAGE PLANS:
+  Stage: Stage-2
+    Spark
+      Edges:
+        Reducer 7 <- Map 6 (GROUP, 2)
+#### A masked pattern was here ####
+      Vertices:
+        Map 6 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart
+                  filterExpr: (ds = '2008-04-08') (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: '2008-04-08' (type: string)
+                    outputColumnNames: ds
+                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    Group By Operator
+                      keys: ds (type: string)
+                      mode: hash
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+        Reducer 7 
+            Execution mode: vectorized
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                Select Operator
+                  expressions: _col0 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                    Spark Partition Pruning Sink Operator
+                      partition key expr: ds
+                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                      target column name: ds
+                      target work: Map 1
+
   Stage: Stage-1
     Spark
       Edges:
         Reducer 5 <- Map 4 (GROUP, 2)
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 1), Reducer 5 (PARTITION-LEVEL SORT, 1)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Reducer 5 (PARTITION-LEVEL SORT, 2)
         Reducer 3 <- Reducer 2 (GROUP, 1)
 #### A masked pattern was here ####
       Vertices:
@@ -1930,13 +2127,17 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart
-                  filterExpr: (ds = '2008-04-08') (type: boolean)
-                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                  filterExpr: ds is not null (type: boolean)
+                  Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                    expressions: ds (type: string)
+                    outputColumnNames: _col0
+                    Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      sort order: 
-                      Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 4 
             Map Operator Tree:
                 TableScan
@@ -1945,10 +2146,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1963,9 +2164,9 @@ STAGE PLANS:
                 condition map:
                      Inner Join 0 to 1
                 keys:
-                  0 
-                  1 
-                Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count()
                   mode: hash
@@ -1976,6 +2177,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1989,20 +2191,19 @@ STAGE PLANS:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-            Execution mode: vectorized
         Reducer 5 
+            Execution mode: vectorized
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Execution mode: vectorized
 
   Stage: Stage-0
     Fetch Operator
@@ -2010,18 +2211,21 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[13][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1000
 PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08'
@@ -2037,7 +2241,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Work 'Reducer 2' is a cross product
+Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: -- non-equi join
 EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr)
 PREHOOK: type: QUERY
@@ -2061,23 +2265,32 @@ STAGE PLANS:
                 TableScan
                   alias: srcpart
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    sort order: 
+                  Select Operator
+                    expressions: ds (type: string), hr (type: string)
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: ds (type: string), hr (type: string)
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                      value expressions: _col0 (type: string), _col1 (type: string)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((date = '2008-04-08') and (hour = 11)) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-               

<TRUNCATED>

[06/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/skewjoinopt12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoinopt12.q.out b/ql/src/test/results/clientpositive/skewjoinopt12.q.out
index 3244e98..355daa5 100644
--- a/ql/src/test/results/clientpositive/skewjoinopt12.q.out
+++ b/ql/src/test/results/clientpositive/skewjoinopt12.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
+              predicate: (key is not null and val is not null and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -77,7 +77,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
+              predicate: (key is not null and val is not null and ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -135,7 +135,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
+              predicate: (key is not null and val is not null and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -150,7 +150,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
+              predicate: (key is not null and val is not null and (not ((((key = '2') and (val = '12')) or ((key = '8') and (val = '18'))) or ((key = '3') and (val = '13'))))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/skewjoinopt14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoinopt14.q.out b/ql/src/test/results/clientpositive/skewjoinopt14.q.out
index 07d210b..a412b5d 100644
--- a/ql/src/test/results/clientpositive/skewjoinopt14.q.out
+++ b/ql/src/test/results/clientpositive/skewjoinopt14.q.out
@@ -92,7 +92,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (key = '2')) (type: boolean)
+              predicate: (key is not null and val is not null and (key = '2')) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -197,7 +197,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not (key = '2'))) (type: boolean)
+              predicate: (key is not null and val is not null and (not (key = '2'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/skewjoinopt16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoinopt16.q.out b/ql/src/test/results/clientpositive/skewjoinopt16.q.out
index e213cb7..4d388fc 100644
--- a/ql/src/test/results/clientpositive/skewjoinopt16.q.out
+++ b/ql/src/test/results/clientpositive/skewjoinopt16.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
+              predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -77,7 +77,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
+              predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '3'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -135,7 +135,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -150,7 +150,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '3')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/skewjoinopt17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoinopt17.q.out b/ql/src/test/results/clientpositive/skewjoinopt17.q.out
index 759b9c2..8fb0885 100644
--- a/ql/src/test/results/clientpositive/skewjoinopt17.q.out
+++ b/ql/src/test/results/clientpositive/skewjoinopt17.q.out
@@ -286,7 +286,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
+              predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -301,7 +301,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
+              predicate: (key is not null and val is not null and (((key = '2') and (val = '12')) or (key = '2'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -359,7 +359,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -374,7 +374,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not (((key = '2') and (val = '12')) or (key = '2')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/skewjoinopt2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoinopt2.q.out b/ql/src/test/results/clientpositive/skewjoinopt2.q.out
index 0a16787..860687d 100644
--- a/ql/src/test/results/clientpositive/skewjoinopt2.q.out
+++ b/ql/src/test/results/clientpositive/skewjoinopt2.q.out
@@ -70,7 +70,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+              predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -85,7 +85,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+              predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -143,7 +143,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -158,7 +158,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -388,7 +388,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+              predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -403,7 +403,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
+              predicate: (key is not null and val is not null and ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -485,7 +485,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -500,7 +500,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
+              predicate: (key is not null and val is not null and (not ((((key = '2') or (key = '7')) or (key = '3')) or (key = '8')))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out
index d2cbdd6..85b8a6e 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 3 Data size: 414 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((userid is not null and pageid is not null) and postid is not null) and type is not null) (type: boolean)
+              predicate: (userid is not null and pageid is not null and postid is not null and type is not null) (type: boolean)
               Statistics: Num rows: 3 Data size: 414 Basic stats: COMPLETE Column stats: NONE
               Sorted Merge Bucket Map Join Operator
                 condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/smb_mapjoin_14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_14.q.out
index b83c736..8632da2 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin_14.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin_14.q.out
@@ -590,7 +590,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean)
+              predicate: ((key < 8) and (key < 6)) (type: boolean)
               Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out
index 4ef2d81..eb29c86 100644
--- a/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out
+++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
+              predicate: (value is not null and (key < 10)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Sorted Merge Bucket Map Join Operator
                 condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out
index 34dbe86..599aabf 100644
--- a/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out
+++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
+              predicate: (value is not null and (key < 10)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Sorted Merge Bucket Map Join Operator
                 condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out
index a9b5b73..16fffcb 100644
--- a/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out
+++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
+              predicate: (value is not null and (key < 10)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               HashTable Sink Operator
                 keys:
@@ -92,7 +92,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and value is not null) and (key < 10)) (type: boolean)
+              predicate: (value is not null and (key < 10)) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Map Join Operator
                 condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out
index c5f0e69..5f1b102 100644
--- a/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out
+++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out
@@ -195,7 +195,7 @@ STAGE PLANS:
             alias: b
             Statistics: Num rows: 500 Data size: 6312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and value2 is not null) and (key < 10)) (type: boolean)
+              predicate: (value2 is not null and (key < 10)) (type: boolean)
               Statistics: Num rows: 166 Data size: 2095 Basic stats: COMPLETE Column stats: NONE
               HashTable Sink Operator
                 keys:
@@ -209,7 +209,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 10218 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and value2 is not null) and (key < 10)) (type: boolean)
+              predicate: (value2 is not null and (key < 10)) (type: boolean)
               Statistics: Num rows: 166 Data size: 3392 Basic stats: COMPLETE Column stats: NONE
               Map Join Operator
                 condition map:

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/auto_join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join16.q.out b/ql/src/test/results/clientpositive/spark/auto_join16.q.out
index e9033cf..6f1ae25 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join16.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join16.q.out
@@ -30,7 +30,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
                     Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -55,7 +55,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
                     Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/auto_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join4.q.out b/ql/src/test/results/clientpositive/spark/auto_join4.q.out
index 7cf582c..3a94961 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join4.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/auto_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join5.q.out b/ql/src/test/results/clientpositive/spark/auto_join5.q.out
index 285450f..55d1e6b 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join5.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/auto_join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join8.q.out b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
index 7c4ed3a..a769f4c 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join8.q.out
@@ -53,7 +53,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out b/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out
index d6c5ae3..ea38826 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out
@@ -189,7 +189,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: (((date is not null and dealid is not null) and cityid is not null) and userid is not null) (type: boolean)
+                    predicate: (date is not null and dealid is not null and cityid is not null and userid is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: dealid (type: int), date (type: string), cityid (type: int), userid (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out b/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
index 0ab1365..85387a7 100644
--- a/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/constprog_semijoin.q.out
@@ -171,7 +171,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((val = 't1val01') and id is not null) and dimid is not null) (type: boolean)
+                    predicate: ((val = 't1val01') and id is not null and dimid is not null) (type: boolean)
                     Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), dimid (type: int)
@@ -303,7 +303,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((val = 't1val01') and dimid is not null) and id is not null) (type: boolean)
+                    predicate: ((val = 't1val01') and dimid is not null and id is not null) (type: boolean)
                     Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), dimid (type: int)
@@ -434,7 +434,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((dimid = 100) = true) and (dimid <> 100)) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and (dimid <> 100) and (dimid = 100) is not null) (type: boolean)
                     Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -452,7 +452,7 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((id = 100) = true) and (id <> 100)) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and (id <> 100) and (id = 100) is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), (id = 100) (type: boolean)
@@ -523,7 +523,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((dimid) IN (100, 200) and ((dimid = 100) = true)) and (dimid = 100) is not null) (type: boolean)
+                    predicate: ((dimid) IN (100, 200) and ((dimid = 100) = true) and (dimid = 100) is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -541,7 +541,7 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((id) IN (100, 200) and ((id = 100) = true)) and (id = 100) is not null) (type: boolean)
+                    predicate: ((id) IN (100, 200) and ((id = 100) = true) and (id = 100) is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), (id = 100) (type: boolean)
@@ -614,7 +614,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((dimid = 100) = true) and (dimid = 200)) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and (dimid = 200) and (dimid = 100) is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string)
@@ -632,7 +632,7 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((id = 100) = true) and (id = 200)) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and (id = 200) and (id = 100) is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
@@ -701,7 +701,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((dimid = 100) = true) and (dimid = 100)) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and (dimid = 100) and (dimid = 100) is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string)
@@ -719,7 +719,7 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((id = 100) = true) and (id = 100)) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and (id = 100) and (id = 100) is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
@@ -790,7 +790,7 @@ STAGE PLANS:
                   alias: table1
                   Statistics: Num rows: 10 Data size: 200 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((dimid = 100) = true) and dimid is not null) and (dimid = 100) is not null) (type: boolean)
+                    predicate: (((dimid = 100) = true) and dimid is not null and (dimid = 100) is not null) (type: boolean)
                     Statistics: Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), val (type: string), val1 (type: string), dimid (type: int)
@@ -808,7 +808,7 @@ STAGE PLANS:
                   alias: table3
                   Statistics: Num rows: 5 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((id = 100) = true) and id is not null) and (id = 100) is not null) (type: boolean)
+                    predicate: (((id = 100) = true) and id is not null and (id = 100) is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), (id = 100) (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
index a7c1e78..8163773 100644
--- a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
+++ b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out
@@ -731,7 +731,7 @@ STAGE PLANS:
                   alias: inventory
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: ((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null) (type: boolean)
+                    predicate: (inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: inv_date_sk (type: int), inv_item_sk (type: int), inv_quantity_on_hand (type: int), inv_warehouse_sk (type: int)
@@ -749,7 +749,7 @@ STAGE PLANS:
                   alias: date_dim
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((d_year = 1999) and (d_moy = 3)) and d_date_sk is not null) (type: boolean)
+                    predicate: ((d_year = 1999) and (d_moy = 3) and d_date_sk is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: d_date_sk (type: int)
@@ -766,7 +766,7 @@ STAGE PLANS:
                   alias: inventory
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: ((inv_item_sk is not null and inv_warehouse_sk is not null) and inv_date_sk is not null) (type: boolean)
+                    predicate: (inv_item_sk is not null and inv_warehouse_sk is not null and inv_date_sk is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: inv_date_sk (type: int), inv_item_sk (type: int), inv_quantity_on_hand (type: int), inv_warehouse_sk (type: int)
@@ -819,7 +819,7 @@ STAGE PLANS:
                   alias: date_dim
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((d_year = 1999) and (d_moy = 4)) and d_date_sk is not null) (type: boolean)
+                    predicate: ((d_year = 1999) and (d_moy = 4) and d_date_sk is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: d_date_sk (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
index 69d8e2a..137d6a1 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
@@ -564,7 +564,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string), substr(value, 5) (type: string)
@@ -586,7 +586,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       keys: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/identity_project_remove_skip.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/identity_project_remove_skip.q.out b/ql/src/test/results/clientpositive/spark/identity_project_remove_skip.q.out
index d334b57..47aee98 100644
--- a/ql/src/test/results/clientpositive/spark/identity_project_remove_skip.q.out
+++ b/ql/src/test/results/clientpositive/spark/identity_project_remove_skip.q.out
@@ -34,7 +34,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and (value = 'val_105')) and (key = '105')) (type: boolean)
+                    predicate: ((value = 'val_105') and (key = '105')) (type: boolean)
                     Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
index 3cd2340..a541c30 100644
--- a/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out
@@ -25,7 +25,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and value is not null) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -43,7 +43,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and value is not null) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -141,10 +141,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  filterExpr: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and value is not null) (type: boolean)
+                  filterExpr: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and value is not null) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -160,10 +160,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: a
-                  filterExpr: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and value is not null) (type: boolean)
+                  filterExpr: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and value is not null) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
index b660ffe..d0ed328 100644
--- a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
+++ b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out
@@ -110,45 +110,45 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 4), Map 4 (PARTITION-LEVEL SORT, 4)
-        Reducer 3 <- Reducer 2 (GROUP, 4)
+        Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2)
+        Reducer 3 <- Reducer 2 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
             Map Operator Tree:
                 TableScan
                   alias: default__src_src1_index__
-                  Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 56811 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) = 0.0) and _bucketname is not null) and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
+                    Statistics: Num rows: 250 Data size: 28405 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 250 Data size: 28405 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: bigint)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 250 Data size: 28405 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: array<bigint>)
         Map 4 
             Map Operator Tree:
                 TableScan
                   alias: default__src_src2_index__
-                  Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 500 Data size: 58811 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((value = 'val_0') and _bucketname is not null) and _offset is not null) (type: boolean)
-                    Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
+                    predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
+                    Statistics: Num rows: 250 Data size: 29405 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 250 Data size: 29405 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string), _col1 (type: bigint)
                         sort order: ++
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
-                        Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 250 Data size: 29405 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: array<bigint>)
         Reducer 2 
             Reduce Operator Tree:
@@ -159,25 +159,25 @@ STAGE PLANS:
                   0 _col0 (type: string), _col1 (type: bigint)
                   1 _col0 (type: string), _col1 (type: bigint)
                 outputColumnNames: _col0, _col1, _col2, _col5
-                Statistics: Num rows: 275 Data size: 25470 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 275 Data size: 31245 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: (not EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(_col2,_col5))) (type: boolean)
-                  Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col0 (type: string), _col1 (type: bigint)
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: collect_set(_col1)
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 138 Data size: 12781 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 138 Data size: 15679 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: array<bigint>)
         Reducer 3 
             Reduce Operator Tree:
@@ -186,10 +186,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 69 Data size: 7839 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 69 Data size: 6390 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 69 Data size: 7839 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
index ec96fc7..54f8d6c 100644
--- a/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
+++ b/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out
@@ -139,7 +139,7 @@ STAGE PLANS:
                   alias: default__src_src1_index__
                   Statistics: Num rows: 500 Data size: 46311 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) = 0.0) and _bucketname is not null) and _offset is not null) (type: boolean)
+                    predicate: ((UDFToDouble(key) = 0.0) and _bucketname is not null and _offset is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 23155 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)
@@ -157,7 +157,7 @@ STAGE PLANS:
                   alias: default__src_src2_index__
                   Statistics: Num rows: 500 Data size: 48311 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((value = 'val_0') and _bucketname is not null) and _offset is not null) (type: boolean)
+                    predicate: ((value = 'val_0') and _bucketname is not null and _offset is not null) (type: boolean)
                     Statistics: Num rows: 250 Data size: 24155 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: _bucketname (type: string), _offset (type: bigint), _bitmaps (type: array<bigint>)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join16.q.out b/ql/src/test/results/clientpositive/spark/join16.q.out
index f64fb98..a85d379 100644
--- a/ql/src/test/results/clientpositive/spark/join16.q.out
+++ b/ql/src/test/results/clientpositive/spark/join16.q.out
@@ -19,7 +19,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
                     Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -36,7 +36,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0)) and (UDFToDouble(value) < 200.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) > 20.0) and (UDFToDouble(value) < 200.0)) (type: boolean)
                     Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join19.q.out b/ql/src/test/results/clientpositive/spark/join19.q.out
index 8995c97..72dd284 100644
--- a/ql/src/test/results/clientpositive/spark/join19.q.out
+++ b/ql/src/test/results/clientpositive/spark/join19.q.out
@@ -141,7 +141,7 @@ STAGE PLANS:
                   alias: t1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Citation')) and subject is not null) (type: boolean)
+                    predicate: ((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Citation') and subject is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: subject (type: string)
@@ -176,7 +176,7 @@ STAGE PLANS:
                   alias: t1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_from') and object is not null) and subject is not null) (type: boolean)
+                    predicate: ((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_from') and object is not null and subject is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: subject (type: string), object (type: string)
@@ -194,7 +194,7 @@ STAGE PLANS:
                   alias: t1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Author')) and subject is not null) (type: boolean)
+                    predicate: ((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Author') and subject is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: subject (type: string)
@@ -211,7 +211,7 @@ STAGE PLANS:
                   alias: t1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_to') and subject is not null) and object is not null) (type: boolean)
+                    predicate: ((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_to') and subject is not null and object is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: subject (type: string), object (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join4.q.out b/ql/src/test/results/clientpositive/spark/join4.q.out
index 55b3a18..f4ff9eb 100644
--- a/ql/src/test/results/clientpositive/spark/join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/join4.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join5.q.out b/ql/src/test/results/clientpositive/spark/join5.q.out
index 66451ab..6e90fa6 100644
--- a/ql/src/test/results/clientpositive/spark/join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/join5.q.out
@@ -58,7 +58,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out
index bcf98c6..270053c 100644
--- a/ql/src/test/results/clientpositive/spark/join8.q.out
+++ b/ql/src/test/results/clientpositive/spark/join8.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
                   alias: src1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join_reorder2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_reorder2.q.out b/ql/src/test/results/clientpositive/spark/join_reorder2.q.out
index a0ec508..a8c34c4 100644
--- a/ql/src/test/results/clientpositive/spark/join_reorder2.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_reorder2.q.out
@@ -225,7 +225,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (key + 1) is not null) (type: boolean)
+                    predicate: (key is not null and val is not null and (key + 1) is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/join_reorder3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_reorder3.q.out b/ql/src/test/results/clientpositive/spark/join_reorder3.q.out
index 745fae5..efd8a8c 100644
--- a/ql/src/test/results/clientpositive/spark/join_reorder3.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_reorder3.q.out
@@ -225,7 +225,7 @@ STAGE PLANS:
                   alias: a
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and val is not null) and (key + 1) is not null) (type: boolean)
+                    predicate: (key is not null and val is not null and (key + 1) is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
index 1817ff1..f9225d7 100644
--- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out
@@ -1006,7 +1006,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1078,7 +1078,7 @@ STAGE PLANS:
                   GatherStats: false
                   Filter Operator
                     isSamplingPred: false
-                    predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+                    predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
                     Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
index fb37e00..1866e37 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out
@@ -40,7 +40,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
                     Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -57,7 +57,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -308,7 +308,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
                     Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -325,7 +325,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_join.q.out
index 98affb5..aed4800 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
                     Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -54,7 +54,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -570,7 +570,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+                    predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
                     Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -587,7 +587,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+                    predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
                     Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
index 386a876..61382da 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
@@ -44,7 +44,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key <> '306') and (sqrt(key) <> 13.0)) and value is not null) (type: boolean)
+                    predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: value (type: string)
@@ -61,7 +61,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((((key <> '302') and (key < '400')) and (key <> '305')) and (key <> '311')) and ((value <> 'val_50') or (key > '1'))) and (key <> '14')) and value is not null) (type: boolean)
+                    predicate: ((key <> '302') and (key < '400') and (key <> '305') and (key <> '311') and ((value <> 'val_50') or (key > '1')) and (key <> '14') and value is not null) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -79,7 +79,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key <> '305') and (key <> '302')) and (key < '400')) and (key <> '14')) and (key <> '311')) (type: boolean)
+                    predicate: ((key <> '305') and (key <> '302') and (key < '400') and (key <> '14') and (key <> '311')) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1728,7 +1728,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key <> '306') and (sqrt(key) <> 13.0)) and value is not null) (type: boolean)
+                    predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: value (type: string)
@@ -1745,7 +1745,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((((key <> '302') and (key < '400')) and (key <> '305')) and (key <> '311')) and ((value <> 'val_50') or (key > '1'))) and (key <> '14')) and value is not null) (type: boolean)
+                    predicate: ((key <> '302') and (key < '400') and (key <> '305') and (key <> '311') and ((value <> 'val_50') or (key > '1')) and (key <> '14') and value is not null) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1763,7 +1763,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((key <> '305') and (key <> '302')) and (key < '400')) and (key <> '14')) and (key <> '311')) (type: boolean)
+                    predicate: ((key <> '305') and (key <> '302') and (key < '400') and (key <> '14') and (key <> '311')) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
index 2dff7ac..fc60d8b 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
@@ -44,7 +44,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((((key <> '13') and (key <> '11')) and (key < '400')) and (key <> '12')) and (key <> '1')) and (key > '0')) and (key <> '4')) (type: boolean)
+                    predicate: ((key <> '13') and (key <> '11') and (key < '400') and (key <> '12') and (key <> '1') and (key > '0') and (key <> '4')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -61,7 +61,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((key <> '11') and (key < '400')) and (key <> '12')) and (key <> '13')) and (key > '0')) and ((value <> 'val_500') or (key > '1'))) and (key <> '4')) and (key <> '1')) (type: boolean)
+                    predicate: ((key <> '11') and (key < '400') and (key <> '12') and (key <> '13') and (key > '0') and ((value <> 'val_500') or (key > '1')) and (key <> '4') and (key <> '1')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -78,7 +78,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((((key <> '12') and (key <> '11')) and (key < '400')) and (key <> '13')) and (key <> '4')) and (key > '0')) and (key <> '1')) (type: boolean)
+                    predicate: ((key <> '12') and (key <> '11') and (key < '400') and (key <> '13') and (key <> '4') and (key > '0') and (key <> '1')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)
@@ -1784,7 +1784,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((((key <> '13') and (key <> '11')) and (key < '400')) and (key <> '12')) and (key <> '1')) and (key > '0')) and (key <> '4')) (type: boolean)
+                    predicate: ((key <> '13') and (key <> '11') and (key < '400') and (key <> '12') and (key <> '1') and (key > '0') and (key <> '4')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -1801,7 +1801,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((((((key <> '11') and (key < '400')) and (key <> '12')) and (key <> '13')) and (key > '0')) and ((value <> 'val_500') or (key > '1'))) and (key <> '4')) and (key <> '1')) (type: boolean)
+                    predicate: ((key <> '11') and (key < '400') and (key <> '12') and (key <> '13') and (key > '0') and ((value <> 'val_500') or (key > '1')) and (key <> '4') and (key <> '1')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string)
@@ -1818,7 +1818,7 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((((key <> '12') and (key <> '11')) and (key < '400')) and (key <> '13')) and (key <> '4')) and (key > '0')) and (key <> '1')) (type: boolean)
+                    predicate: ((key <> '12') and (key <> '11') and (key < '400') and (key <> '13') and (key <> '4') and (key > '0') and (key <> '1')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: string), value (type: string)


[07/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query80.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query80.q.out b/ql/src/test/results/clientpositive/perf/query80.q.out
index e1bbb33..014a621 100644
--- a/ql/src/test/results/clientpositive/perf/query80.q.out
+++ b/ql/src/test/results/clientpositive/perf/query80.q.out
@@ -123,7 +123,7 @@ Stage-0
                                                       Select Operator [SEL_41] (rows=1 width=0)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                                                         Filter Operator [FIL_192] (rows=1 width=0)
-                                                          predicate:(((cs_sold_date_sk is not null and cs_catalog_page_sk is not null) and cs_item_sk is not null) and cs_promo_sk is not null)
+                                                          predicate:(cs_sold_date_sk is not null and cs_catalog_page_sk is not null and cs_item_sk is not null and cs_promo_sk is not null)
                                                           TableScan [TS_39] (rows=1 width=0)
                                                             default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_catalog_page_sk","cs_item_sk","cs_promo_sk","cs_order_number","cs_ext_sales_price","cs_net_profit"]
                                                   <-Map 23 [SIMPLE_EDGE]
@@ -215,7 +215,7 @@ Stage-0
                                                       Select Operator [SEL_82] (rows=1 width=0)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                                                         Filter Operator [FIL_198] (rows=1 width=0)
-                                                          predicate:(((ws_sold_date_sk is not null and ws_web_site_sk is not null) and ws_item_sk is not null) and ws_promo_sk is not null)
+                                                          predicate:(ws_sold_date_sk is not null and ws_web_site_sk is not null and ws_item_sk is not null and ws_promo_sk is not null)
                                                           TableScan [TS_80] (rows=1 width=0)
                                                             default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_site_sk","ws_promo_sk","ws_order_number","ws_ext_sales_price","ws_net_profit"]
                                                   <-Map 35 [SIMPLE_EDGE]
@@ -307,7 +307,7 @@ Stage-0
                                                       Select Operator [SEL_2] (rows=1 width=0)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                                                         Filter Operator [FIL_186] (rows=1 width=0)
-                                                          predicate:(((ss_sold_date_sk is not null and ss_store_sk is not null) and ss_item_sk is not null) and ss_promo_sk is not null)
+                                                          predicate:(ss_sold_date_sk is not null and ss_store_sk is not null and ss_item_sk is not null and ss_promo_sk is not null)
                                                           TableScan [TS_0] (rows=1 width=0)
                                                             default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_ext_sales_price","ss_net_profit"]
                                                   <-Map 11 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query82.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query82.q.out b/ql/src/test/results/clientpositive/perf/query82.q.out
index 57a50c7..66d5a8f 100644
--- a/ql/src/test/results/clientpositive/perf/query82.q.out
+++ b/ql/src/test/results/clientpositive/perf/query82.q.out
@@ -51,7 +51,7 @@ Stage-0
                             Select Operator [SEL_2] (rows=115500 width=1436)
                               Output:["_col0","_col1","_col2","_col3"]
                               Filter Operator [FIL_38] (rows=115500 width=1436)
-                                predicate:(((i_manufact_id) IN (437, 129, 727, 663) and i_current_price BETWEEN 30 AND 60) and i_item_sk is not null)
+                                predicate:((i_manufact_id) IN (437, 129, 727, 663) and i_current_price BETWEEN 30 AND 60 and i_item_sk is not null)
                                 TableScan [TS_0] (rows=462000 width=1436)
                                   default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc","i_current_price","i_manufact_id"]
                         <-Map 6 [SIMPLE_EDGE]
@@ -60,7 +60,7 @@ Stage-0
                             Select Operator [SEL_5] (rows=1 width=0)
                               Output:["_col0","_col1"]
                               Filter Operator [FIL_39] (rows=1 width=0)
-                                predicate:((inv_quantity_on_hand BETWEEN 100 AND 500 and inv_item_sk is not null) and inv_date_sk is not null)
+                                predicate:(inv_quantity_on_hand BETWEEN 100 AND 500 and inv_item_sk is not null and inv_date_sk is not null)
                                 TableScan [TS_3] (rows=1 width=0)
                                   default@inventory,inventory,Tbl:PARTIAL,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_quantity_on_hand"]
                         <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query84.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query84.q.out b/ql/src/test/results/clientpositive/perf/query84.q.out
index dfd5460..a2e25bb 100644
--- a/ql/src/test/results/clientpositive/perf/query84.q.out
+++ b/ql/src/test/results/clientpositive/perf/query84.q.out
@@ -33,7 +33,7 @@ Stage-0
                     Select Operator [SEL_14] (rows=2 width=12)
                       Output:["_col0"]
                       Filter Operator [FIL_61] (rows=2 width=12)
-                        predicate:(((ib_lower_bound >= 32287) and (ib_upper_bound <= 82287)) and ib_income_band_sk is not null)
+                        predicate:((ib_lower_bound >= 32287) and (ib_upper_bound <= 82287) and ib_income_band_sk is not null)
                         TableScan [TS_12] (rows=20 width=12)
                           default@income_band,income_band,Tbl:COMPLETE,Col:NONE,Output:["ib_income_band_sk","ib_lower_bound","ib_upper_bound"]
                 <-Reducer 4 [SIMPLE_EDGE]
@@ -84,7 +84,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=80000000 width=860)
                                   Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                   Filter Operator [FIL_57] (rows=80000000 width=860)
-                                    predicate:((c_current_addr_sk is not null and c_current_cdemo_sk is not null) and c_current_hdemo_sk is not null)
+                                    predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_current_hdemo_sk is not null)
                                     TableScan [TS_0] (rows=80000000 width=860)
                                       default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_id","c_current_cdemo_sk","c_current_hdemo_sk","c_current_addr_sk","c_first_name","c_last_name"]
                             <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query85.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query85.q.out b/ql/src/test/results/clientpositive/perf/query85.q.out
index 93b5f4e..cff7eaf 100644
--- a/ql/src/test/results/clientpositive/perf/query85.q.out
+++ b/ql/src/test/results/clientpositive/perf/query85.q.out
@@ -78,7 +78,7 @@ Stage-0
                                         Select Operator [SEL_28] (rows=10000000 width=1014)
                                           Output:["_col0","_col1"]
                                           Filter Operator [FIL_98] (rows=10000000 width=1014)
-                                            predicate:(((ca_state) IN ('KY', 'GA', 'NM', 'MT', 'OR', 'IN', 'WI', 'MO', 'WV') and (ca_country = 'United States')) and ca_address_sk is not null)
+                                            predicate:((ca_state) IN ('KY', 'GA', 'NM', 'MT', 'OR', 'IN', 'WI', 'MO', 'WV') and (ca_country = 'United States') and ca_address_sk is not null)
                                             TableScan [TS_26] (rows=40000000 width=1014)
                                               default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state","ca_country"]
                                     <-Reducer 5 [SIMPLE_EDGE]
@@ -92,7 +92,7 @@ Stage-0
                                             Select Operator [SEL_25] (rows=19800 width=362)
                                               Output:["_col0","_col1","_col2"]
                                               Filter Operator [FIL_97] (rows=19800 width=362)
-                                                predicate:((((((cd_education_status = '4 yr Degree') or (cd_education_status = 'Primary') or (cd_education_status = 'Advanced Degree')) and ((cd_marital_status = 'M') or (cd_marital_status = 'D') or (cd_marital_status = 'U'))) and cd_demo_sk is not null) and cd_marital_status is not null) and cd_education_status is not null)
+                                                predicate:(((cd_education_status = '4 yr Degree') or (cd_education_status = 'Primary') or (cd_education_status = 'Advanced Degree')) and ((cd_marital_status = 'M') or (cd_marital_status = 'D') or (cd_marital_status = 'U')) and cd_demo_sk is not null and cd_marital_status is not null and cd_education_status is not null)
                                                 TableScan [TS_23] (rows=19800 width=362)
                                                   default@customer_demographics,cd1,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                                         <-Reducer 4 [SIMPLE_EDGE]
@@ -110,7 +110,7 @@ Stage-0
                                                     Select Operator [SEL_11] (rows=19800 width=362)
                                                       Output:["_col0","_col1","_col2"]
                                                       Filter Operator [FIL_96] (rows=19800 width=362)
-                                                        predicate:((((((cd_education_status = '4 yr Degree') or (cd_education_status = 'Primary') or (cd_education_status = 'Advanced Degree')) and ((cd_marital_status = 'M') or (cd_marital_status = 'D') or (cd_marital_status = 'U'))) and cd_demo_sk is not null) and cd_education_status is not null) and cd_marital_status is not null)
+                                                        predicate:(((cd_education_status = '4 yr Degree') or (cd_education_status = 'Primary') or (cd_education_status = 'Advanced Degree')) and ((cd_marital_status = 'M') or (cd_marital_status = 'D') or (cd_marital_status = 'U')) and cd_demo_sk is not null and cd_education_status is not null and cd_marital_status is not null)
                                                         TableScan [TS_9] (rows=19800 width=362)
                                                           default@customer_demographics,cd1,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                                                 <-Reducer 3 [SIMPLE_EDGE]
@@ -138,7 +138,7 @@ Stage-0
                                                             Select Operator [SEL_2] (rows=1 width=0)
                                                               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
                                                               Filter Operator [FIL_93] (rows=1 width=0)
-                                                                predicate:((((((ws_sales_price BETWEEN 100.0 AND 150.0 or ws_sales_price BETWEEN 50.0 AND 100.0 or ws_sales_price BETWEEN 150.0 AND 200.0) and (ws_net_profit BETWEEN 100 AND 200 or ws_net_profit BETWEEN 150 AND 300 or ws_net_profit BETWEEN 50 AND 250)) and ws_order_number is not null) and ws_item_sk is not null) and ws_web_page_sk is not null) and ws_sold_date_sk is not null)
+                                                                predicate:((ws_sales_price BETWEEN 100.0 AND 150.0 or ws_sales_price BETWEEN 50.0 AND 100.0 or ws_sales_price BETWEEN 150.0 AND 200.0) and (ws_net_profit BETWEEN 100 AND 200 or ws_net_profit BETWEEN 150 AND 300 or ws_net_profit BETWEEN 50 AND 250) and ws_order_number is not null and ws_item_sk is not null and ws_web_page_sk is not null and ws_sold_date_sk is not null)
                                                                 TableScan [TS_0] (rows=1 width=0)
                                                                   default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_page_sk","ws_order_number","ws_quantity","ws_sales_price","ws_net_profit"]
                                                         <-Map 11 [SIMPLE_EDGE]
@@ -147,7 +147,7 @@ Stage-0
                                                             Select Operator [SEL_5] (rows=1 width=0)
                                                               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                                               Filter Operator [FIL_94] (rows=1 width=0)
-                                                                predicate:(((((wr_item_sk is not null and wr_order_number is not null) and wr_refunded_cdemo_sk is not null) and wr_returning_cdemo_sk is not null) and wr_refunded_addr_sk is not null) and wr_reason_sk is not null)
+                                                                predicate:(wr_item_sk is not null and wr_order_number is not null and wr_refunded_cdemo_sk is not null and wr_returning_cdemo_sk is not null and wr_refunded_addr_sk is not null and wr_reason_sk is not null)
                                                                 TableScan [TS_3] (rows=1 width=0)
                                                                   default@web_returns,web_returns,Tbl:PARTIAL,Col:NONE,Output:["wr_item_sk","wr_refunded_cdemo_sk","wr_refunded_addr_sk","wr_returning_cdemo_sk","wr_reason_sk","wr_order_number","wr_fee","wr_refunded_cash"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query88.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query88.q.out b/ql/src/test/results/clientpositive/perf/query88.q.out
index 11f907f..ed2c961 100644
--- a/ql/src/test/results/clientpositive/perf/query88.q.out
+++ b/ql/src/test/results/clientpositive/perf/query88.q.out
@@ -258,7 +258,7 @@ Stage-0
                           Select Operator [SEL_34] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_304] (rows=14400 width=471)
-                              predicate:(((t_hour = 9) and (t_minute < 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 9) and (t_minute < 30) and t_time_sk is not null)
                               TableScan [TS_32] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 11 [SIMPLE_EDGE]
@@ -272,7 +272,7 @@ Stage-0
                               Select Operator [SEL_28] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_302] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_26] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 15 [SIMPLE_EDGE]
@@ -314,7 +314,7 @@ Stage-0
                           Select Operator [SEL_60] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_308] (rows=14400 width=471)
-                              predicate:(((t_hour = 9) and (t_minute >= 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 9) and (t_minute >= 30) and t_time_sk is not null)
                               TableScan [TS_58] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 19 [SIMPLE_EDGE]
@@ -328,7 +328,7 @@ Stage-0
                               Select Operator [SEL_54] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_306] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_52] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 23 [SIMPLE_EDGE]
@@ -370,7 +370,7 @@ Stage-0
                           Select Operator [SEL_86] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_312] (rows=14400 width=471)
-                              predicate:(((t_hour = 10) and (t_minute < 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 10) and (t_minute < 30) and t_time_sk is not null)
                               TableScan [TS_84] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 27 [SIMPLE_EDGE]
@@ -384,7 +384,7 @@ Stage-0
                               Select Operator [SEL_80] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_310] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_78] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 31 [SIMPLE_EDGE]
@@ -426,7 +426,7 @@ Stage-0
                           Select Operator [SEL_112] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_316] (rows=14400 width=471)
-                              predicate:(((t_hour = 10) and (t_minute >= 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 10) and (t_minute >= 30) and t_time_sk is not null)
                               TableScan [TS_110] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 35 [SIMPLE_EDGE]
@@ -440,7 +440,7 @@ Stage-0
                               Select Operator [SEL_106] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_314] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_104] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 39 [SIMPLE_EDGE]
@@ -482,7 +482,7 @@ Stage-0
                           Select Operator [SEL_138] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_320] (rows=14400 width=471)
-                              predicate:(((t_hour = 11) and (t_minute < 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 11) and (t_minute < 30) and t_time_sk is not null)
                               TableScan [TS_136] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 43 [SIMPLE_EDGE]
@@ -496,7 +496,7 @@ Stage-0
                               Select Operator [SEL_132] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_318] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_130] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 47 [SIMPLE_EDGE]
@@ -538,7 +538,7 @@ Stage-0
                           Select Operator [SEL_8] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_300] (rows=14400 width=471)
-                              predicate:(((t_hour = 8) and (t_minute >= 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 8) and (t_minute >= 30) and t_time_sk is not null)
                               TableScan [TS_6] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 2 [SIMPLE_EDGE]
@@ -552,7 +552,7 @@ Stage-0
                               Select Operator [SEL_2] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_298] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_0] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 7 [SIMPLE_EDGE]
@@ -594,7 +594,7 @@ Stage-0
                           Select Operator [SEL_164] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_324] (rows=14400 width=471)
-                              predicate:(((t_hour = 11) and (t_minute >= 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 11) and (t_minute >= 30) and t_time_sk is not null)
                               TableScan [TS_162] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 51 [SIMPLE_EDGE]
@@ -608,7 +608,7 @@ Stage-0
                               Select Operator [SEL_158] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_322] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_156] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 55 [SIMPLE_EDGE]
@@ -650,7 +650,7 @@ Stage-0
                           Select Operator [SEL_190] (rows=14400 width=471)
                             Output:["_col0"]
                             Filter Operator [FIL_328] (rows=14400 width=471)
-                              predicate:(((t_hour = 12) and (t_minute < 30)) and t_time_sk is not null)
+                              predicate:((t_hour = 12) and (t_minute < 30) and t_time_sk is not null)
                               TableScan [TS_188] (rows=86400 width=471)
                                 default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                       <-Reducer 59 [SIMPLE_EDGE]
@@ -664,7 +664,7 @@ Stage-0
                               Select Operator [SEL_184] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_326] (rows=1 width=0)
-                                  predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                  predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                   TableScan [TS_182] (rows=1 width=0)
                                     default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                           <-Map 63 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query89.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query89.q.out b/ql/src/test/results/clientpositive/perf/query89.q.out
index 0cda449..165d829 100644
--- a/ql/src/test/results/clientpositive/perf/query89.q.out
+++ b/ql/src/test/results/clientpositive/perf/query89.q.out
@@ -134,7 +134,7 @@ Stage-0
                                                 Select Operator [SEL_2] (rows=231000 width=1436)
                                                   Output:["_col0","_col1","_col2","_col3"]
                                                   Filter Operator [FIL_47] (rows=231000 width=1436)
-                                                    predicate:(((((i_category) IN ('Home', 'Books', 'Electronics') or (i_category) IN ('Shoes', 'Jewelry', 'Men')) and ((i_class) IN ('wallpaper', 'parenting', 'musical') or (i_class) IN ('womens', 'birdal', 'pants'))) and (((i_category) IN ('Home', 'Books', 'Electronics') and (i_class) IN ('wallpaper', 'parenting', 'musical')) or ((i_category) IN ('Shoes', 'Jewelry', 'Men') and (i_class) IN ('womens', 'birdal', 'pants')))) and i_item_sk is not null)
+                                                    predicate:(((i_category) IN ('Home', 'Books', 'Electronics') or (i_category) IN ('Shoes', 'Jewelry', 'Men')) and ((i_class) IN ('wallpaper', 'parenting', 'musical') or (i_class) IN ('womens', 'birdal', 'pants')) and (((i_category) IN ('Home', 'Books', 'Electronics') and (i_class) IN ('wallpaper', 'parenting', 'musical')) or ((i_category) IN ('Shoes', 'Jewelry', 'Men') and (i_class) IN ('womens', 'birdal', 'pants'))) and i_item_sk is not null)
                                                     TableScan [TS_0] (rows=462000 width=1436)
                                                       default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_brand","i_class","i_category"]
                                             <-Map 8 [SIMPLE_EDGE]
@@ -143,7 +143,7 @@ Stage-0
                                                 Select Operator [SEL_5] (rows=1 width=0)
                                                   Output:["_col0","_col1","_col2","_col3"]
                                                   Filter Operator [FIL_48] (rows=1 width=0)
-                                                    predicate:((ss_item_sk is not null and ss_sold_date_sk is not null) and ss_store_sk is not null)
+                                                    predicate:(ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
                                                     TableScan [TS_3] (rows=1 width=0)
                                                       default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_sales_price"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query90.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query90.q.out b/ql/src/test/results/clientpositive/perf/query90.q.out
index ccb61a4..62c8df6 100644
--- a/ql/src/test/results/clientpositive/perf/query90.q.out
+++ b/ql/src/test/results/clientpositive/perf/query90.q.out
@@ -77,7 +77,7 @@ Stage-0
                                       Select Operator [SEL_28] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2"]
                                         Filter Operator [FIL_83] (rows=1 width=0)
-                                          predicate:((ws_ship_hdemo_sk is not null and ws_sold_time_sk is not null) and ws_web_page_sk is not null)
+                                          predicate:(ws_ship_hdemo_sk is not null and ws_sold_time_sk is not null and ws_web_page_sk is not null)
                                           TableScan [TS_26] (rows=1 width=0)
                                             default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_time_sk","ws_ship_hdemo_sk","ws_web_page_sk"]
                                   <-Map 16 [SIMPLE_EDGE]
@@ -133,7 +133,7 @@ Stage-0
                                       Select Operator [SEL_2] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2"]
                                         Filter Operator [FIL_79] (rows=1 width=0)
-                                          predicate:((ws_ship_hdemo_sk is not null and ws_sold_time_sk is not null) and ws_web_page_sk is not null)
+                                          predicate:(ws_ship_hdemo_sk is not null and ws_sold_time_sk is not null and ws_web_page_sk is not null)
                                           TableScan [TS_0] (rows=1 width=0)
                                             default@web_sales,web_sales,Tbl:PARTIAL,Col:NONE,Output:["ws_sold_time_sk","ws_ship_hdemo_sk","ws_web_page_sk"]
                                   <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query91.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query91.q.out b/ql/src/test/results/clientpositive/perf/query91.q.out
index 66d8056..4725646 100644
--- a/ql/src/test/results/clientpositive/perf/query91.q.out
+++ b/ql/src/test/results/clientpositive/perf/query91.q.out
@@ -57,7 +57,7 @@ Stage-0
                               Select Operator [SEL_17] (rows=9900 width=362)
                                 Output:["_col0","_col1","_col2"]
                                 Filter Operator [FIL_79] (rows=9900 width=362)
-                                  predicate:(((((cd_marital_status = 'M') or (cd_marital_status = 'W')) and ((cd_education_status = 'Unknown') or (cd_education_status = 'Advanced Degree'))) and (((cd_marital_status = 'M') and (cd_education_status = 'Unknown')) or ((cd_marital_status = 'W') and (cd_education_status = 'Advanced Degree')))) and cd_demo_sk is not null)
+                                  predicate:(((cd_marital_status = 'M') or (cd_marital_status = 'W')) and ((cd_education_status = 'Unknown') or (cd_education_status = 'Advanced Degree')) and (((cd_marital_status = 'M') and (cd_education_status = 'Unknown')) or ((cd_marital_status = 'W') and (cd_education_status = 'Advanced Degree'))) and cd_demo_sk is not null)
                                   TableScan [TS_15] (rows=19800 width=362)
                                     default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                           <-Reducer 5 [SIMPLE_EDGE]
@@ -85,7 +85,7 @@ Stage-0
                                       Select Operator [SEL_11] (rows=80000000 width=860)
                                         Output:["_col0","_col1","_col2","_col3"]
                                         Filter Operator [FIL_77] (rows=80000000 width=860)
-                                          predicate:(((c_customer_sk is not null and c_current_addr_sk is not null) and c_current_cdemo_sk is not null) and c_current_hdemo_sk is not null)
+                                          predicate:(c_customer_sk is not null and c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_current_hdemo_sk is not null)
                                           TableScan [TS_9] (rows=80000000 width=860)
                                             default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_hdemo_sk","c_current_addr_sk"]
                                   <-Reducer 3 [SIMPLE_EDGE]
@@ -99,7 +99,7 @@ Stage-0
                                           Select Operator [SEL_8] (rows=18262 width=1119)
                                             Output:["_col0"]
                                             Filter Operator [FIL_76] (rows=18262 width=1119)
-                                              predicate:(((d_year = 1999) and (d_moy = 11)) and d_date_sk is not null)
+                                              predicate:((d_year = 1999) and (d_moy = 11) and d_date_sk is not null)
                                               TableScan [TS_6] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                       <-Reducer 2 [SIMPLE_EDGE]
@@ -122,7 +122,7 @@ Stage-0
                                               Select Operator [SEL_5] (rows=1 width=0)
                                                 Output:["_col0","_col1","_col2","_col3"]
                                                 Filter Operator [FIL_75] (rows=1 width=0)
-                                                  predicate:((cr_call_center_sk is not null and cr_returned_date_sk is not null) and cr_returning_customer_sk is not null)
+                                                  predicate:(cr_call_center_sk is not null and cr_returned_date_sk is not null and cr_returning_customer_sk is not null)
                                                   TableScan [TS_3] (rows=1 width=0)
                                                     default@catalog_returns,catalog_returns,Tbl:PARTIAL,Col:NONE,Output:["cr_returned_date_sk","cr_returning_customer_sk","cr_call_center_sk","cr_net_loss"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query92.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query92.q.out b/ql/src/test/results/clientpositive/perf/query92.q.out
index 9327f5a..3a1d03d 100644
--- a/ql/src/test/results/clientpositive/perf/query92.q.out
+++ b/ql/src/test/results/clientpositive/perf/query92.q.out
@@ -57,7 +57,7 @@ Stage-0
                                 Select Operator [SEL_5] (rows=8116 width=1119)
                                   Output:["_col0"]
                                   Filter Operator [FIL_43] (rows=8116 width=1119)
-                                    predicate:(((d_month_seq >= 1206) and (d_month_seq <= 1217)) and d_date_sk is not null)
+                                    predicate:((d_month_seq >= 1206) and (d_month_seq <= 1217) and d_date_sk is not null)
                                     TableScan [TS_3] (rows=73049 width=1119)
                                       default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"]
                 <-Reducer 9 [SIMPLE_EDGE]
@@ -78,7 +78,7 @@ Stage-0
                               Select Operator [SEL_19] (rows=8116 width=1119)
                                 Output:["_col0"]
                                 Filter Operator [FIL_45] (rows=8116 width=1119)
-                                  predicate:(((d_month_seq >= 1206) and (d_month_seq <= 1217)) and d_date_sk is not null)
+                                  predicate:((d_month_seq >= 1206) and (d_month_seq <= 1217) and d_date_sk is not null)
                                   TableScan [TS_17] (rows=73049 width=1119)
                                     default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"]
                           <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query93.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query93.q.out b/ql/src/test/results/clientpositive/perf/query93.q.out
index b273468..87d6c26 100644
--- a/ql/src/test/results/clientpositive/perf/query93.q.out
+++ b/ql/src/test/results/clientpositive/perf/query93.q.out
@@ -62,7 +62,7 @@ Stage-0
                               Select Operator [SEL_5] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2","_col3"]
                                 Filter Operator [FIL_31] (rows=1 width=0)
-                                  predicate:((sr_reason_sk is not null and sr_item_sk is not null) and sr_ticket_number is not null)
+                                  predicate:(sr_reason_sk is not null and sr_item_sk is not null and sr_ticket_number is not null)
                                   TableScan [TS_3] (rows=1 width=0)
                                     default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_item_sk","sr_reason_sk","sr_ticket_number","sr_return_quantity"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query94.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query94.q.out b/ql/src/test/results/clientpositive/perf/query94.q.out
index 76c12cd..8ac8cc2 100644
--- a/ql/src/test/results/clientpositive/perf/query94.q.out
+++ b/ql/src/test/results/clientpositive/perf/query94.q.out
@@ -95,7 +95,7 @@ Stage-0
                                         Select Operator [SEL_2] (rows=1 width=0)
                                           Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                           Filter Operator [FIL_72] (rows=1 width=0)
-                                            predicate:(((ws_ship_addr_sk is not null and ws_web_site_sk is not null) and ws_ship_date_sk is not null) and ws_order_number is not null)
+                                            predicate:(ws_ship_addr_sk is not null and ws_web_site_sk is not null and ws_ship_date_sk is not null and ws_order_number is not null)
                                             TableScan [TS_0] (rows=1 width=0)
                                               default@web_sales,ws1,Tbl:PARTIAL,Col:NONE,Output:["ws_ship_date_sk","ws_ship_addr_sk","ws_web_site_sk","ws_order_number","ws_ext_ship_cost","ws_net_profit"]
                                     <-Reducer 9 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query95.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query95.q.out b/ql/src/test/results/clientpositive/perf/query95.q.out
index 6447b37..534e910 100644
--- a/ql/src/test/results/clientpositive/perf/query95.q.out
+++ b/ql/src/test/results/clientpositive/perf/query95.q.out
@@ -76,7 +76,7 @@ Stage-0
                               Select Operator [SEL_2] (rows=1 width=0)
                                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                 Filter Operator [FIL_107] (rows=1 width=0)
-                                  predicate:(((ws_ship_addr_sk is not null and ws_web_site_sk is not null) and ws_ship_date_sk is not null) and ws_order_number is not null)
+                                  predicate:(ws_ship_addr_sk is not null and ws_web_site_sk is not null and ws_ship_date_sk is not null and ws_order_number is not null)
                                   TableScan [TS_0] (rows=1 width=0)
                                     default@web_sales,ws1,Tbl:PARTIAL,Col:NONE,Output:["ws_ship_date_sk","ws_ship_addr_sk","ws_web_site_sk","ws_order_number","ws_ext_ship_cost","ws_net_profit"]
                           <-Reducer 11 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query96.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query96.q.out b/ql/src/test/results/clientpositive/perf/query96.q.out
index 34e4bf9..b3a9652 100644
--- a/ql/src/test/results/clientpositive/perf/query96.q.out
+++ b/ql/src/test/results/clientpositive/perf/query96.q.out
@@ -51,7 +51,7 @@ Stage-0
                             Select Operator [SEL_8] (rows=14400 width=471)
                               Output:["_col0"]
                               Filter Operator [FIL_41] (rows=14400 width=471)
-                                predicate:(((t_hour = 8) and (t_minute >= 30)) and t_time_sk is not null)
+                                predicate:((t_hour = 8) and (t_minute >= 30) and t_time_sk is not null)
                                 TableScan [TS_6] (rows=86400 width=471)
                                   default@time_dim,time_dim,Tbl:COMPLETE,Col:NONE,Output:["t_time_sk","t_hour","t_minute"]
                         <-Reducer 2 [SIMPLE_EDGE]
@@ -65,7 +65,7 @@ Stage-0
                                 Select Operator [SEL_2] (rows=1 width=0)
                                   Output:["_col0","_col1","_col2"]
                                   Filter Operator [FIL_39] (rows=1 width=0)
-                                    predicate:((ss_hdemo_sk is not null and ss_sold_time_sk is not null) and ss_store_sk is not null)
+                                    predicate:(ss_hdemo_sk is not null and ss_sold_time_sk is not null and ss_store_sk is not null)
                                     TableScan [TS_0] (rows=1 width=0)
                                       default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_time_sk","ss_hdemo_sk","ss_store_sk"]
                             <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_gby_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/ppd_gby_join.q.out
index 721ca13..1894b04 100644
--- a/ql/src/test/results/clientpositive/ppd_gby_join.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby_join.q.out
@@ -35,7 +35,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
               Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -50,7 +50,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -305,7 +305,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
               Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -320,7 +320,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join.q.out b/ql/src/test/results/clientpositive/ppd_join.q.out
index 6b82401..6081d48 100644
--- a/ql/src/test/results/clientpositive/ppd_join.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join.q.out
@@ -32,7 +32,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
               Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -47,7 +47,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -557,7 +557,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((key > '1') and (key < '400')) and (key > '2')) and (key > '20')) and ((value < 'val_50') or (key > '2'))) and (key <> '4')) (type: boolean)
+              predicate: ((key > '1') and (key < '400') and (key > '2') and (key > '20') and ((value < 'val_50') or (key > '2')) and (key <> '4')) (type: boolean)
               Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -572,7 +572,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '2') and (key > '1')) and (key < '400')) and (key <> '4')) and (key > '20')) (type: boolean)
+              predicate: ((key > '2') and (key > '1') and (key < '400') and (key <> '4') and (key > '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join2.q.out b/ql/src/test/results/clientpositive/ppd_join2.q.out
index 9487678..729383a 100644
--- a/ql/src/test/results/clientpositive/ppd_join2.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join2.q.out
@@ -39,7 +39,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((key <> '302') and (key < '400')) and (key <> '305')) and (key <> '311')) and ((value <> 'val_50') or (key > '1'))) and (key <> '14')) and value is not null) (type: boolean)
+              predicate: ((key <> '302') and (key < '400') and (key <> '305') and (key <> '311') and ((value <> 'val_50') or (key > '1')) and (key <> '14') and value is not null) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -55,7 +55,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key <> '305') and (key <> '302')) and (key < '400')) and (key <> '14')) and (key <> '311')) (type: boolean)
+              predicate: ((key <> '305') and (key <> '302') and (key < '400') and (key <> '14') and (key <> '311')) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -97,7 +97,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key <> '306') and (sqrt(key) <> 13.0)) and value is not null) (type: boolean)
+              predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: string)
@@ -1723,7 +1723,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((key <> '302') and (key < '400')) and (key <> '305')) and (key <> '311')) and ((value <> 'val_50') or (key > '1'))) and (key <> '14')) and value is not null) (type: boolean)
+              predicate: ((key <> '302') and (key < '400') and (key <> '305') and (key <> '311') and ((value <> 'val_50') or (key > '1')) and (key <> '14') and value is not null) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -1739,7 +1739,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key <> '305') and (key <> '302')) and (key < '400')) and (key <> '14')) and (key <> '311')) (type: boolean)
+              predicate: ((key <> '305') and (key <> '302') and (key < '400') and (key <> '14') and (key <> '311')) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -1781,7 +1781,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key <> '306') and (sqrt(key) <> 13.0)) and value is not null) (type: boolean)
+              predicate: ((key <> '306') and (sqrt(key) <> 13.0) and value is not null) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join3.q.out b/ql/src/test/results/clientpositive/ppd_join3.q.out
index c1586bc..d50bf49 100644
--- a/ql/src/test/results/clientpositive/ppd_join3.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join3.q.out
@@ -39,7 +39,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((key <> '11') and (key < '400')) and (key <> '12')) and (key <> '13')) and (key > '0')) and ((value <> 'val_500') or (key > '1'))) and (key <> '4')) and (key <> '1')) (type: boolean)
+              predicate: ((key <> '11') and (key < '400') and (key <> '12') and (key <> '13') and (key > '0') and ((value <> 'val_500') or (key > '1')) and (key <> '4') and (key <> '1')) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -54,7 +54,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((key <> '12') and (key <> '11')) and (key < '400')) and (key <> '13')) and (key <> '4')) and (key > '0')) and (key <> '1')) (type: boolean)
+              predicate: ((key <> '12') and (key <> '11') and (key < '400') and (key <> '13') and (key <> '4') and (key > '0') and (key <> '1')) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -96,7 +96,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((key <> '13') and (key <> '11')) and (key < '400')) and (key <> '12')) and (key <> '1')) and (key > '0')) and (key <> '4')) (type: boolean)
+              predicate: ((key <> '13') and (key <> '11') and (key < '400') and (key <> '12') and (key <> '1') and (key > '0') and (key <> '4')) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -1779,7 +1779,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((key <> '11') and (key < '400')) and (key <> '12')) and (key <> '13')) and (key > '0')) and ((value <> 'val_500') or (key > '1'))) and (key <> '4')) and (key <> '1')) (type: boolean)
+              predicate: ((key <> '11') and (key < '400') and (key <> '12') and (key <> '13') and (key > '0') and ((value <> 'val_500') or (key > '1')) and (key <> '4') and (key <> '1')) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -1794,7 +1794,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((key <> '12') and (key <> '11')) and (key < '400')) and (key <> '13')) and (key <> '4')) and (key > '0')) and (key <> '1')) (type: boolean)
+              predicate: ((key <> '12') and (key <> '11') and (key < '400') and (key <> '13') and (key <> '4') and (key > '0') and (key <> '1')) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -1836,7 +1836,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((((key <> '13') and (key <> '11')) and (key < '400')) and (key <> '12')) and (key <> '1')) and (key > '0')) and (key <> '4')) (type: boolean)
+              predicate: ((key <> '13') and (key <> '11') and (key < '400') and (key <> '12') and (key <> '1') and (key > '0') and (key <> '4')) (type: boolean)
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join4.q.out b/ql/src/test/results/clientpositive/ppd_join4.q.out
index cebb681..6ca7446 100644
--- a/ql/src/test/results/clientpositive/ppd_join4.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join4.q.out
@@ -57,7 +57,7 @@ STAGE PLANS:
             alias: test_tbl
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((id is not null and (name = 'c')) and (id = 'a')) (type: boolean)
+              predicate: ((name = 'c') and (id = 'a')) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
index 1794578..24aa6ec 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out
@@ -32,7 +32,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -48,7 +48,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -259,7 +259,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -275,7 +275,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
index b6b5a1c..c339d2f 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out
@@ -32,7 +32,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -48,7 +48,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -259,7 +259,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -275,7 +275,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) (type: boolean)
+              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
index 4abec73..ba5d187 100644
--- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out
@@ -38,7 +38,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((sqrt(key) <> 13.0) and (key > '10')) and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -53,7 +53,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) and (sqrt(key) <> 13.0)) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -69,7 +69,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) and (sqrt(key) <> 13.0)) (type: boolean)
+              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -402,7 +402,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((sqrt(key) <> 13.0) and (key > '10')) and (key < '20')) and (key > '15')) and (key < '25')) (type: boolean)
+              predicate: ((sqrt(key) <> 13.0) and (key > '10') and (key < '20') and (key > '15') and (key < '25')) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
@@ -417,7 +417,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '10') and (key < '20')) and (key > '15')) and (key < '25')) and (sqrt(key) <> 13.0)) (type: boolean)
+              predicate: ((key > '10') and (key < '20') and (key > '15') and (key < '25') and (sqrt(key) <> 13.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -433,7 +433,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((key > '15') and (key < '25')) and (key > '10')) and (key < '20')) and (sqrt(key) <> 13.0)) (type: boolean)
+              predicate: ((key > '15') and (key < '25') and (key > '10') and (key < '20') and (sqrt(key) <> 13.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_udf_case.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_udf_case.q.out b/ql/src/test/results/clientpositive/ppd_udf_case.q.out
index bfe61c2..1c1c2a4 100644
--- a/ql/src/test/results/clientpositive/ppd_udf_case.q.out
+++ b/ql/src/test/results/clientpositive/ppd_udf_case.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ds = '2008-04-08') and CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END) and key is not null) (type: boolean)
+              predicate: ((ds = '2008-04-08') and CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END and key is not null) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), hr (type: string)
@@ -53,7 +53,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((ds = '2008-04-08') and CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END) and key is not null) (type: boolean)
+              predicate: ((ds = '2008-04-08') and CASE WHEN ((key = '27')) THEN (true) WHEN ((key = '38')) THEN (false) ELSE (null) END and key is not null) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), hr (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_union.q.out b/ql/src/test/results/clientpositive/ppd_union.q.out
index 87b57c4..6f231b8 100644
--- a/ql/src/test/results/clientpositive/ppd_union.q.out
+++ b/ql/src/test/results/clientpositive/ppd_union.q.out
@@ -28,7 +28,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key < '100') and (key > '4')) and (value > 'val_4')) (type: boolean)
+              predicate: ((key < '100') and (key > '4') and (value > 'val_4')) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -47,7 +47,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > '150') and (key > '4')) and (value > 'val_4')) (type: boolean)
+              predicate: ((key > '150') and (key > '4') and (value > 'val_4')) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -290,7 +290,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key < '100') and (key > '4')) and (value > 'val_4')) (type: boolean)
+              predicate: ((key < '100') and (key > '4') and (value > 'val_4')) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -309,7 +309,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > '150') and (key > '4')) and (value > 'val_4')) (type: boolean)
+              predicate: ((key > '150') and (key > '4') and (value > 'val_4')) (type: boolean)
               Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/ppd_vc.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_vc.q.out b/ql/src/test/results/clientpositive/ppd_vc.q.out
index 4575172..cc25e80 100644
--- a/ql/src/test/results/clientpositive/ppd_vc.q.out
+++ b/ql/src/test/results/clientpositive/ppd_vc.q.out
@@ -438,7 +438,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: (((BLOCK__OFFSET__INSIDE__FILE < 100) and (BLOCK__OFFSET__INSIDE__FILE < 50)) and key is not null) (type: boolean)
+              predicate: ((BLOCK__OFFSET__INSIDE__FILE < 100) and (BLOCK__OFFSET__INSIDE__FILE < 50) and key is not null) (type: boolean)
               Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string), ds (type: string), hr (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/rcfile_null_value.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rcfile_null_value.q.out b/ql/src/test/results/clientpositive/rcfile_null_value.q.out
index c90287c..1a361e1 100644
--- a/ql/src/test/results/clientpositive/rcfile_null_value.q.out
+++ b/ql/src/test/results/clientpositive/rcfile_null_value.q.out
@@ -100,7 +100,7 @@ STAGE PLANS:
             alias: src1
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/router_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out
index 8a10129..f149058 100644
--- a/ql/src/test/results/clientpositive/router_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out
@@ -1353,7 +1353,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
               Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -1374,7 +1374,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/sample8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample8.q.out b/ql/src/test/results/clientpositive/sample8.q.out
index a5ae1ef..3f50ed2 100644
--- a/ql/src/test/results/clientpositive/sample8.q.out
+++ b/ql/src/test/results/clientpositive/sample8.q.out
@@ -98,7 +98,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: true
-              predicate: (((((hash(key) & 2147483647) % 10) = 0) and value is not null) and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean)
+              predicate: ((((hash(key) & 2147483647) % 10) = 0) and value is not null and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean)
               Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)
@@ -114,7 +114,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: true
-              predicate: (((((hash(key) & 2147483647) % 1) = 0) and value is not null) and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean)
+              predicate: ((((hash(key) & 2147483647) % 1) = 0) and value is not null and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
                 key expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin.q.out b/ql/src/test/results/clientpositive/semijoin.q.out
index 6005f72..25f62a2 100644
--- a/ql/src/test/results/clientpositive/semijoin.q.out
+++ b/ql/src/test/results/clientpositive/semijoin.q.out
@@ -768,7 +768,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 11 Data size: 84 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 5) and (value <= 'val_20')) and key is not null) (type: boolean)
+              predicate: ((key > 5) and (value <= 'val_20')) (type: boolean)
               Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/semijoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin2.q.out b/ql/src/test/results/clientpositive/semijoin2.q.out
index 62e1961..757341a 100644
--- a/ql/src/test/results/clientpositive/semijoin2.q.out
+++ b/ql/src/test/results/clientpositive/semijoin2.q.out
@@ -62,7 +62,7 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((bigint_col_22 is not null and decimal1709_col_26 is not null) and tinyint_col_8 is not null) and timestamp_col_10 is not null) (type: boolean)
+              predicate: (bigint_col_22 is not null and decimal1709_col_26 is not null and tinyint_col_8 is not null and timestamp_col_10 is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Reduce Output Operator
                 key expressions: bigint_col_22 (type: bigint), decimal1709_col_26 (type: decimal(38,23)), tinyint_col_8 (type: tinyint)
@@ -74,7 +74,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: (((UDFToLong(tinyint_col_6) is not null and decimal0504_col_37 is not null) and tinyint_col_33 is not null) and UDFToInteger(smallint_col_38) is not null) (type: boolean)
+              predicate: (UDFToLong(tinyint_col_6) is not null and decimal0504_col_37 is not null and tinyint_col_33 is not null and UDFToInteger(smallint_col_38) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Reduce Output Operator
                 key expressions: UDFToLong(tinyint_col_6) (type: bigint), decimal0504_col_37 (type: decimal(38,23)), tinyint_col_33 (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/semijoin4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin4.q.out b/ql/src/test/results/clientpositive/semijoin4.q.out
index 2aaf7ea..015dad1 100644
--- a/ql/src/test/results/clientpositive/semijoin4.q.out
+++ b/ql/src/test/results/clientpositive/semijoin4.q.out
@@ -69,7 +69,7 @@ STAGE PLANS:
             alias: t1
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToInteger(tinyint_col_46) = -92) and decimal1309_col_65 is not null) and bigint_col_13 is not null) and UDFToInteger(tinyint_col_46) is not null) (type: boolean)
+              predicate: ((UDFToInteger(tinyint_col_46) = -92) and decimal1309_col_65 is not null and bigint_col_13 is not null and UDFToInteger(tinyint_col_46) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: bigint_col_13 (type: bigint), smallint_col_24 (type: smallint), double_col_60 (type: double), decimal1309_col_65 (type: decimal(13,9))
@@ -85,7 +85,7 @@ STAGE PLANS:
             alias: t2
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToInteger(tinyint_col_21) = -92) and tinyint_col_18 is not null) and decimal2709_col_9 is not null) and UDFToInteger(tinyint_col_21) is not null) (type: boolean)
+              predicate: ((UDFToInteger(tinyint_col_21) = -92) and tinyint_col_18 is not null and decimal2709_col_9 is not null and UDFToInteger(tinyint_col_21) is not null) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: decimal2709_col_9 (type: decimal(27,9)), tinyint_col_18 (type: tinyint)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out
index fd77635..a994bf7 100644
--- a/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out
@@ -106,7 +106,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (key = '2')) (type: boolean)
+              predicate: (key is not null and val is not null and (key = '2')) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)
@@ -136,7 +136,7 @@ STAGE PLANS:
             alias: a
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key is not null and val is not null) and (not (key = '2'))) (type: boolean)
+              predicate: (key is not null and val is not null and (not (key = '2'))) (type: boolean)
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), val (type: string)


[58/58] [abbrv] hive git commit: Merge branch 'master' into llap

Posted by jd...@apache.org.
Merge branch 'master' into llap

Conflicts:
	llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
	llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/99cb7f96
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/99cb7f96
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/99cb7f96

Branch: refs/heads/llap
Commit: 99cb7f96f1a516698b587320e841d26cd7bb034d
Parents: 0afaa8f 2d28291
Author: Jason Dere <jd...@hortonworks.com>
Authored: Tue Apr 19 15:24:19 2016 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Tue Apr 19 15:24:19 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hive/beeline/BeeLine.java   |   22 +-
 .../hive/beeline/BeeLineCommandCompleter.java   |   26 +-
 .../beeline/SeparatedValuesOutputFormat.java    |    2 +-
 bin/ext/cleardanglingscratchdir.cmd             |   35 +
 bin/ext/cleardanglingscratchdir.sh              |   28 +
 bin/ext/llapstatus.sh                           |   42 +
 .../apache/hadoop/hive/common/FileUtils.java    |    2 +-
 .../hadoop/hive/common/type/HiveBaseChar.java   |    6 +
 .../hadoop/hive/common/type/HiveVarchar.java    |    4 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   11 +-
 .../org/apache/hadoop/hive/conf/Validator.java  |    2 +-
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |   51 +-
 .../apache/hive/common/util/StreamPrinter.java  |   18 +-
 .../java/org/apache/hive/http/HttpServer.java   |   29 +-
 .../hive/common/type/TestHiveBaseChar.java      |    2 +
 .../hadoop/hive/hbase/HBaseSerDeParameters.java |    6 +-
 .../hive/hbase/HiveHBaseInputFormatUtil.java    |    6 +-
 .../test/results/positive/hbase_pushdown.q.out  |    2 +-
 .../test/results/positive/ppd_key_ranges.q.out  |    2 +-
 .../mapreduce/FileOutputCommitterContainer.java |    4 +-
 .../mapreduce/FosterStorageHandler.java         |    6 +-
 .../streaming/StreamingIntegrationTester.java   |   12 +-
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |   15 +-
 .../hive/metastore/TestMetaStoreMetrics.java    |   13 +-
 .../TestHiveAuthorizerCheckInvocation.java      |    2 +-
 .../plugin/TestHiveAuthorizerShowFilters.java   |    4 +-
 .../ql/session/TestClearDanglingScratchDir.java |  131 +
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   |   78 +
 .../apache/hive/jdbc/TestJdbcWithMiniHA.java    |  200 ++
 .../jdbc/authorization/TestHS2AuthzContext.java |   14 +-
 .../authorization/TestJdbcMetadataApiAuth.java  |    4 +-
 .../hive/jdbc/miniHS2/StartMiniHS2Cluster.java  |    2 +-
 .../hive/jdbc/miniHS2/TestHs2Metrics.java       |   41 +-
 .../service/cli/session/TestQueryDisplay.java   |    2 +
 .../cli/thrift/TestThriftBinaryCLIService.java  |   92 -
 .../cli/thrift/TestThriftHttpCLIService.java    |  241 --
 .../thrift/ThriftCliServiceMessageSizeTest.java |  140 +
 itests/qtest/pom.xml                            |    2 +-
 .../hive/ql/security/DummyAuthenticator.java    |    5 +
 .../security/InjectableDummyAuthenticator.java  |    5 +
 ...SQLStdHiveAuthorizationValidatorForTest.java |   47 +-
 .../org/apache/hive/jdbc/HiveBaseResultSet.java |   10 +-
 .../org/apache/hive/jdbc/HiveConnection.java    |   84 +-
 .../org/apache/hive/jdbc/HiveStatement.java     |   18 +
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |   20 +-
 .../hive/llap/registry/ServiceInstance.java     |    6 +
 .../hive/llap/registry/ServiceRegistry.java     |    4 +-
 .../registry/impl/LlapFixedRegistryImpl.java    |   29 +-
 .../llap/registry/impl/LlapRegistryService.java |   17 +-
 .../impl/LlapZookeeperRegistryImpl.java         |   29 +-
 llap-server/pom.xml                             |   55 +
 llap-server/sql/serviceCheckScript.sql          |   12 +
 .../hive/llap/cli/LlapOptionsProcessor.java     |   23 +-
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |    8 +
 .../llap/cli/LlapStatusOptionsProcessor.java    |  139 +
 .../hive/llap/cli/LlapStatusServiceDriver.java  |  821 ++++++
 .../hive/llap/daemon/impl/LlapDaemon.java       |   17 +-
 .../daemon/services/impl/LlapWebServices.java   |  176 +-
 .../llap/io/decode/OrcEncodedDataConsumer.java  |   45 +-
 .../llap/metrics/LlapDaemonCacheMetrics.java    |    4 +-
 .../llap/metrics/LlapDaemonExecutorMetrics.java |    4 +-
 .../llap/metrics/LlapDaemonQueueMetrics.java    |    4 +-
 .../hadoop/hive/llap/metrics/MetricsUtils.java  |    1 -
 .../hive/llap/security/SecretManager.java       |   18 +-
 .../llap/shufflehandler/ShuffleHandler.java     |    2 +-
 .../hadoop-metrics2.properties.template         |   50 +
 .../main/resources/llap-cli-log4j2.properties   |    9 +-
 .../hive/llap/daemon/MiniLlapCluster.java       |    5 +-
 .../hive/metastore/MetaStoreSchemaInfo.java     |    4 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |   28 +-
 .../hive/metastore/hbase/HBaseImport.java       |    4 +-
 .../hive/metastore/hbase/HBaseReadWrite.java    |   10 +-
 orc/src/java/org/apache/orc/OrcUtils.java       |   75 +
 orc/src/java/org/apache/orc/Reader.java         |    6 +
 orc/src/java/org/apache/orc/RecordReader.java   |    8 +-
 .../java/org/apache/orc/TypeDescription.java    |   62 +-
 .../org/apache/orc/impl/BitFieldReader.java     |    5 +-
 .../java/org/apache/orc/impl/IntegerReader.java |   26 +-
 .../apache/orc/impl/RunLengthByteReader.java    |   36 +-
 .../apache/orc/impl/RunLengthIntegerReader.java |   31 +-
 .../orc/impl/RunLengthIntegerReaderV2.java      |   33 +-
 .../java/org/apache/orc/impl/WriterImpl.java    |   47 +-
 packaging/src/main/assembly/bin.xml             |    9 +
 pom.xml                                         |    1 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   22 +-
 .../hive/ql/exec/AbstractFileMergeOperator.java |    9 +
 .../hive/ql/exec/AppMasterEventOperator.java    |    4 +-
 .../hadoop/hive/ql/exec/CollectOperator.java    |   11 +-
 .../hadoop/hive/ql/exec/CommonJoinOperator.java |    8 +-
 .../hadoop/hive/ql/exec/DemuxOperator.java      |    8 +-
 .../hadoop/hive/ql/exec/DummyStoreOperator.java |   11 +-
 .../hadoop/hive/ql/exec/FileSinkOperator.java   |    4 +-
 .../hadoop/hive/ql/exec/FilterOperator.java     |    4 +-
 .../hadoop/hive/ql/exec/ForwardOperator.java    |    4 +-
 .../hadoop/hive/ql/exec/GroupByOperator.java    |    4 +-
 .../hive/ql/exec/HashTableDummyOperator.java    |    4 +-
 .../ql/exec/LateralViewForwardOperator.java     |    5 +-
 .../hive/ql/exec/LateralViewJoinOperator.java   |    4 +-
 .../hadoop/hive/ql/exec/LimitOperator.java      |    4 +-
 .../hadoop/hive/ql/exec/ListSinkOperator.java   |   11 +-
 .../apache/hadoop/hive/ql/exec/MapOperator.java |    9 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |    1 +
 .../apache/hadoop/hive/ql/exec/MuxOperator.java |    8 +-
 .../apache/hadoop/hive/ql/exec/Operator.java    |   22 +-
 .../apache/hadoop/hive/ql/exec/PTFOperator.java |    4 +-
 .../hadoop/hive/ql/exec/ScriptOperator.java     |   40 +-
 .../hadoop/hive/ql/exec/SelectOperator.java     |    4 +-
 .../ql/exec/SparkHashTableSinkOperator.java     |   10 +-
 .../hadoop/hive/ql/exec/TableScanOperator.java  |    2 +-
 .../apache/hadoop/hive/ql/exec/TaskRunner.java  |    3 +-
 .../hadoop/hive/ql/exec/TerminalOperator.java   |   10 +
 .../hadoop/hive/ql/exec/UDTFOperator.java       |    8 +-
 .../hadoop/hive/ql/exec/UnionOperator.java      |    2 +-
 .../hadoop/hive/ql/exec/mr/MapRedTask.java      |    5 +-
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java |    9 +-
 .../vector/VectorAppMasterEventOperator.java    |   11 +-
 .../ql/exec/vector/VectorFileSinkOperator.java  |    8 +-
 .../ql/exec/vector/VectorFilterOperator.java    |    8 +-
 .../ql/exec/vector/VectorGroupByOperator.java   |   25 +-
 .../ql/exec/vector/VectorLimitOperator.java     |    5 +-
 .../ql/exec/vector/VectorMapJoinOperator.java   |   11 +-
 .../VectorMapJoinOuterFilteredOperator.java     |    8 +-
 .../hive/ql/exec/vector/VectorMapOperator.java  |    5 +-
 .../exec/vector/VectorReduceSinkOperator.java   |    8 +-
 .../exec/vector/VectorSMBMapJoinOperator.java   |   13 +-
 .../ql/exec/vector/VectorSelectOperator.java    |   21 +-
 .../VectorSparkHashTableSinkOperator.java       |    6 +-
 ...VectorSparkPartitionPruningSinkOperator.java |    8 +-
 .../ql/exec/vector/VectorizationContext.java    |   11 +-
 .../ql/exec/vector/VectorizedRowBatchCtx.java   |   13 +-
 .../hadoop/hive/ql/hooks/HookContext.java       |   11 +-
 .../hive/ql/index/IndexPredicateAnalyzer.java   |   35 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |   43 +-
 .../hive/ql/io/orc/OrcRawRecordMerger.java      |    7 +-
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |   12 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   50 +-
 .../hadoop/hive/ql/io/orc/SchemaEvolution.java  |  234 +-
 .../hive/ql/io/orc/TreeReaderFactory.java       |  858 +++---
 .../ql/io/orc/VectorizedOrcInputFormat.java     |   32 +-
 .../hadoop/hive/ql/io/orc/WriterImpl.java       |    2 -
 .../ql/io/parquet/convert/ETypeConverter.java   |    3 +-
 .../io/parquet/convert/HiveStructConverter.java |    2 +-
 .../write/ParquetRecordWriterWrapper.java       |    4 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |    2 +-
 .../optimizer/ConstantPropagateProcFactory.java |   22 +-
 .../hadoop/hive/ql/optimizer/IndexUtils.java    |    2 +
 .../SizeBasedBigTableSelectorForAutoSMJ.java    |    2 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |   37 +-
 .../calcite/cost/HiveAlgorithmsUtil.java        |   12 +-
 .../hive/ql/optimizer/physical/LlapDecider.java |   12 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |    2 +
 .../stats/annotation/StatsRulesProcFactory.java |  107 +-
 .../apache/hadoop/hive/ql/parse/ASTNode.java    |    2 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |    4 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |    4 +-
 .../hadoop/hive/ql/parse/MaskAndFilterInfo.java |   38 +
 .../apache/hadoop/hive/ql/parse/ParseUtils.java |    8 +-
 .../hadoop/hive/ql/parse/ReplicationSpec.java   |    3 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   53 +-
 .../apache/hadoop/hive/ql/parse/TableMask.java  |   85 +-
 .../hadoop/hive/ql/parse/TableSample.java       |    4 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |   84 +-
 .../hive/ql/parse/spark/GenSparkUtils.java      |    2 +-
 .../SparkPartitionPruningSinkOperator.java      |   16 +-
 .../hadoop/hive/ql/plan/ExprNodeDescUtils.java  |   14 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |    2 +-
 .../hadoop/hive/ql/processors/CommandUtil.java  |    6 +-
 .../ql/security/HadoopDefaultAuthenticator.java |    5 +
 .../ql/security/HiveAuthenticationProvider.java |    2 +
 .../SessionStateConfigUserAuthenticator.java    |    5 +
 .../security/SessionStateUserAuthenticator.java |    5 +
 .../AuthorizationMetaStoreFilterHook.java       |    4 +-
 .../plugin/HiveAuthorizationValidator.java      |   12 +-
 .../authorization/plugin/HiveAuthorizer.java    |   68 +-
 .../plugin/HiveAuthorizerImpl.java              |   20 +-
 .../authorization/plugin/HiveAuthzContext.java  |   83 -
 .../plugin/HivePrivilegeObject.java             |   31 +
 .../authorization/plugin/HiveV1Authorizer.java  |   20 +-
 .../authorization/plugin/QueryContext.java      |   78 +
 .../sqlstd/DummyHiveAuthorizationValidator.java |   21 +-
 .../SQLStdHiveAuthorizationValidator.java       |   22 +-
 .../ql/session/ClearDanglingScratchDir.java     |  176 ++
 .../hadoop/hive/ql/session/OperationLog.java    |   10 +-
 .../hadoop/hive/ql/session/SessionState.java    |   43 +-
 .../hive/ql/stats/fs/FSStatsAggregator.java     |    2 +-
 .../hive/ql/stats/fs/FSStatsPublisher.java      |    3 +-
 .../hive/ql/txn/compactor/CompactorMR.java      |   12 +-
 .../org/apache/hadoop/hive/ql/udf/UDFSign.java  |   15 +
 .../apache/hadoop/hive/ql/udf/UDFToByte.java    |    7 +-
 .../apache/hadoop/hive/ql/udf/UDFToDouble.java  |    6 +-
 .../apache/hadoop/hive/ql/udf/UDFToFloat.java   |    6 +-
 .../apache/hadoop/hive/ql/udf/UDFToInteger.java |    4 +
 .../apache/hadoop/hive/ql/udf/UDFToLong.java    |    4 +
 .../apache/hadoop/hive/ql/udf/UDFToShort.java   |    4 +
 .../ql/udf/generic/GenericUDAFComputeStats.java |    6 +
 .../hive/ql/udf/generic/GenericUDFBetween.java  |    2 +-
 .../hive/ql/exec/TestFunctionRegistry.java      |   26 +-
 .../hadoop/hive/ql/exec/TestOperatorNames.java  |   98 +
 .../vector/util/FakeCaptureOutputOperator.java  |   10 +-
 .../util/FakeVectorDataSourceOperator.java      |   11 +-
 .../hive/ql/io/orc/TestTypeDescription.java     |    4 +-
 .../hive/ql/io/orc/TestVectorOrcFile.java       | 1634 ++++++------
 .../hive/ql/io/orc/TestVectorizedORCReader.java |    7 +-
 .../hive/ql/metadata/TestTableIterable.java     |   67 +
 .../hive/ql/parse/TestSemanticAnalyzer.java     |   37 +
 .../ql/udf/generic/TestGenericUDFOPMinus.java   |    4 +-
 .../udf/generic/TestGenericUDFOPMultiply.java   |    4 +-
 .../ql/udf/generic/TestGenericUDFOPPlus.java    |    4 +-
 .../clientpositive/alter_partition_change_col.q |    1 +
 .../clientpositive/alter_table_cascade.q        |    1 +
 .../clientpositive/cbo_rp_udf_udaf_stats_opt.q  |   22 +
 .../queries/clientpositive/float_equality.q     |    3 +
 ql/src/test/queries/clientpositive/foldts.q     |   20 +
 .../test/queries/clientpositive/type_widening.q |    6 +
 .../queries/clientpositive/vector_between_in.q  |   30 +
 .../vector_orc_string_reader_empty_dict.q       |   20 +
 .../queries/clientpositive/windowing_gby2.q     |   41 +
 .../results/clientnegative/dyn_part_max.q.out   |    2 +-
 .../clientpositive/annotate_stats_filter.q.out  |    2 +-
 .../results/clientpositive/auto_join16.q.out    |    4 +-
 .../results/clientpositive/auto_join4.q.out     |    2 +-
 .../results/clientpositive/auto_join5.q.out     |    2 +-
 .../results/clientpositive/auto_join8.q.out     |    2 +-
 .../auto_join_reordering_values.q.out           |    2 +-
 .../clientpositive/auto_sortmerge_join_8.q.out  |    2 +
 .../test/results/clientpositive/cbo_const.q.out |    2 +-
 .../results/clientpositive/cbo_rp_join1.q.out   |    4 +-
 .../cbo_rp_udf_udaf_stats_opt.q.out             |  126 +
 .../clientpositive/constprog_semijoin.q.out     |   24 +-
 .../clientpositive/correlationoptimizer13.q.out |    2 +-
 .../clientpositive/correlationoptimizer9.q.out  |    4 +-
 .../clientpositive/dynamic_rdd_cache.q.out      |    8 +-
 .../dynpart_sort_optimization.q.out             |    2 +-
 .../clientpositive/explain_logical.q.out        |    6 +-
 .../clientpositive/filter_cond_pushdown.q.out   |    8 +-
 .../results/clientpositive/float_equality.q.out |   17 +
 ql/src/test/results/clientpositive/foldts.q.out |  154 ++
 .../groupby_multi_single_reducer3.q.out         |    8 +-
 .../clientpositive/groupby_position.q.out       |    4 +-
 .../identity_project_remove_skip.q.out          |    2 +-
 .../clientpositive/index_auto_mult_tables.q.out |   20 +-
 .../index_auto_mult_tables_compact.q.out        |   20 +-
 .../clientpositive/index_auto_self_join.q.out   |   20 +-
 .../results/clientpositive/index_bitmap3.q.out  |    4 +-
 .../clientpositive/index_bitmap_auto.q.out      |    4 +-
 .../index_bitmap_compression.q.out              |    4 +-
 .../clientpositive/infer_const_type.q.out       |   10 +-
 .../clientpositive/input_testxpath4.q.out       |    2 +-
 ql/src/test/results/clientpositive/join16.q.out |    4 +-
 ql/src/test/results/clientpositive/join19.q.out |    8 +-
 ql/src/test/results/clientpositive/join4.q.out  |    2 +-
 ql/src/test/results/clientpositive/join42.q.out |    2 +-
 ql/src/test/results/clientpositive/join5.q.out  |    2 +-
 ql/src/test/results/clientpositive/join8.q.out  |    2 +-
 .../clientpositive/join_grp_diff_keys.q.out     |    2 +-
 .../results/clientpositive/join_reorder2.q.out  |    2 +-
 .../results/clientpositive/join_reorder3.q.out  |    2 +-
 .../llap/dynamic_partition_pruning.q.out        |   20 +-
 .../llap/dynamic_partition_pruning_2.q.out      |   30 +-
 .../llap/hybridgrace_hashjoin_2.q.out           |   12 +-
 .../clientpositive/llap/tez_join_hash.q.out     |    4 +
 .../llap/tez_union_group_by.q.out               |    2 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   20 +-
 .../clientpositive/louter_join_ppr.q.out        |    4 +-
 .../test/results/clientpositive/masking_1.q.out |   12 +-
 .../test/results/clientpositive/masking_2.q.out |   10 +-
 .../test/results/clientpositive/masking_3.q.out |    2 +-
 .../test/results/clientpositive/masking_4.q.out |    4 +-
 .../test/results/clientpositive/masking_5.q.out |    2 +-
 .../clientpositive/masking_disablecbo_1.q.out   |   14 +-
 .../clientpositive/masking_disablecbo_2.q.out   |   10 +-
 .../clientpositive/masking_disablecbo_3.q.out   |    2 +-
 .../clientpositive/masking_disablecbo_4.q.out   |    4 +-
 .../results/clientpositive/multiMapJoin1.q.out  |   16 +-
 .../clientpositive/orc_predicate_pushdown.q.out |   24 +-
 .../parquet_predicate_pushdown.q.out            |   28 +-
 ql/src/test/results/clientpositive/pcs.q.out    |    6 +-
 .../results/clientpositive/perf/query13.q.out   |    6 +-
 .../results/clientpositive/perf/query15.q.out   |    2 +-
 .../results/clientpositive/perf/query17.q.out   |   14 +-
 .../results/clientpositive/perf/query18.q.out   |    6 +-
 .../results/clientpositive/perf/query19.q.out   |    4 +-
 .../results/clientpositive/perf/query21.q.out   |    2 +-
 .../results/clientpositive/perf/query22.q.out   |    2 +-
 .../results/clientpositive/perf/query25.q.out   |   12 +-
 .../results/clientpositive/perf/query26.q.out   |    4 +-
 .../results/clientpositive/perf/query27.q.out   |    4 +-
 .../results/clientpositive/perf/query29.q.out   |   18 +-
 .../results/clientpositive/perf/query31.q.out   |   12 +-
 .../results/clientpositive/perf/query32.q.out   |    2 +-
 .../results/clientpositive/perf/query34.q.out   |    6 +-
 .../results/clientpositive/perf/query39.q.out   |    8 +-
 .../results/clientpositive/perf/query40.q.out   |    2 +-
 .../results/clientpositive/perf/query42.q.out   |    2 +-
 .../results/clientpositive/perf/query45.q.out   |    4 +-
 .../results/clientpositive/perf/query46.q.out   |   14 +-
 .../results/clientpositive/perf/query48.q.out   |    6 +-
 .../results/clientpositive/perf/query50.q.out   |    6 +-
 .../results/clientpositive/perf/query52.q.out   |    2 +-
 .../results/clientpositive/perf/query54.q.out   |   10 +-
 .../results/clientpositive/perf/query55.q.out   |    2 +-
 .../results/clientpositive/perf/query64.q.out   |   16 +-
 .../results/clientpositive/perf/query65.q.out   |    4 +-
 .../results/clientpositive/perf/query66.q.out   |    4 +-
 .../results/clientpositive/perf/query67.q.out   |    2 +-
 .../results/clientpositive/perf/query68.q.out   |    4 +-
 .../results/clientpositive/perf/query7.q.out    |    4 +-
 .../results/clientpositive/perf/query71.q.out   |   12 +-
 .../results/clientpositive/perf/query72.q.out   |    6 +-
 .../results/clientpositive/perf/query73.q.out   |    6 +-
 .../results/clientpositive/perf/query75.q.out   |   14 +-
 .../results/clientpositive/perf/query76.q.out   |    6 +-
 .../results/clientpositive/perf/query79.q.out   |    4 +-
 .../results/clientpositive/perf/query80.q.out   |    6 +-
 .../results/clientpositive/perf/query82.q.out   |    4 +-
 .../results/clientpositive/perf/query84.q.out   |    4 +-
 .../results/clientpositive/perf/query85.q.out   |   10 +-
 .../results/clientpositive/perf/query88.q.out   |   32 +-
 .../results/clientpositive/perf/query89.q.out   |   16 +-
 .../results/clientpositive/perf/query90.q.out   |    4 +-
 .../results/clientpositive/perf/query91.q.out   |    8 +-
 .../results/clientpositive/perf/query92.q.out   |    4 +-
 .../results/clientpositive/perf/query93.q.out   |    2 +-
 .../results/clientpositive/perf/query94.q.out   |    2 +-
 .../results/clientpositive/perf/query95.q.out   |    2 +-
 .../results/clientpositive/perf/query96.q.out   |    4 +-
 .../results/clientpositive/pointlookup.q.out    |   12 +-
 .../results/clientpositive/pointlookup2.q.out   |   16 +-
 .../results/clientpositive/pointlookup3.q.out   |    8 +-
 .../results/clientpositive/ppd_gby_join.q.out   |    8 +-
 .../test/results/clientpositive/ppd_join.q.out  |    8 +-
 .../test/results/clientpositive/ppd_join2.q.out |   12 +-
 .../test/results/clientpositive/ppd_join3.q.out |   12 +-
 .../test/results/clientpositive/ppd_join4.q.out |    2 +-
 .../clientpositive/ppd_outer_join2.q.out        |    8 +-
 .../clientpositive/ppd_outer_join3.q.out        |    8 +-
 .../clientpositive/ppd_outer_join4.q.out        |   12 +-
 .../results/clientpositive/ppd_udf_case.q.out   |    4 +-
 .../test/results/clientpositive/ppd_union.q.out |    8 +-
 ql/src/test/results/clientpositive/ppd_vc.q.out |    2 +-
 .../clientpositive/rcfile_null_value.q.out      |    2 +-
 .../clientpositive/router_join_ppr.q.out        |    4 +-
 .../test/results/clientpositive/sample8.q.out   |    4 +-
 .../test/results/clientpositive/semijoin.q.out  |    2 +-
 .../test/results/clientpositive/semijoin2.q.out |    4 +-
 .../test/results/clientpositive/semijoin4.q.out |    4 +-
 .../clientpositive/skewjoin_mapjoin9.q.out      |    4 +-
 .../results/clientpositive/skewjoinopt12.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt14.q.out  |    4 +-
 .../results/clientpositive/skewjoinopt16.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt17.q.out  |    8 +-
 .../results/clientpositive/skewjoinopt2.q.out   |   16 +-
 .../results/clientpositive/smb_mapjoin_10.q.out |    2 +-
 .../results/clientpositive/smb_mapjoin_14.q.out |    2 +-
 .../clientpositive/sort_merge_join_desc_2.q.out |    2 +-
 .../clientpositive/sort_merge_join_desc_3.q.out |    2 +-
 .../clientpositive/sort_merge_join_desc_4.q.out |    4 +-
 .../clientpositive/sort_merge_join_desc_8.q.out |    4 +-
 .../clientpositive/spark/auto_join16.q.out      |    4 +-
 .../clientpositive/spark/auto_join4.q.out       |    2 +-
 .../clientpositive/spark/auto_join5.q.out       |    2 +-
 .../clientpositive/spark/auto_join8.q.out       |    2 +-
 .../spark/auto_join_reordering_values.q.out     |    2 +-
 .../spark/constprog_semijoin.q.out              |   24 +-
 .../spark/dynamic_rdd_cache.q.out               |    8 +-
 .../spark/groupby_multi_single_reducer3.q.out   |    8 +-
 .../clientpositive/spark/groupby_position.q.out |    4 +-
 .../spark/identity_project_remove_skip.q.out    |    2 +-
 .../spark/index_auto_self_join.q.out            |   12 +-
 .../clientpositive/spark/index_bitmap3.q.out    |    4 +-
 .../spark/index_bitmap_auto.q.out               |    4 +-
 .../results/clientpositive/spark/join16.q.out   |    4 +-
 .../results/clientpositive/spark/join19.q.out   |    8 +-
 .../results/clientpositive/spark/join4.q.out    |    2 +-
 .../results/clientpositive/spark/join5.q.out    |    2 +-
 .../results/clientpositive/spark/join8.q.out    |    2 +-
 .../clientpositive/spark/join_reorder2.q.out    |    2 +-
 .../clientpositive/spark/join_reorder3.q.out    |    2 +-
 .../clientpositive/spark/louter_join_ppr.q.out  |    4 +-
 .../clientpositive/spark/ppd_gby_join.q.out     |    8 +-
 .../results/clientpositive/spark/ppd_join.q.out |    8 +-
 .../clientpositive/spark/ppd_join2.q.out        |   12 +-
 .../clientpositive/spark/ppd_join3.q.out        |   12 +-
 .../clientpositive/spark/ppd_outer_join2.q.out  |    8 +-
 .../clientpositive/spark/ppd_outer_join3.q.out  |    8 +-
 .../clientpositive/spark/ppd_outer_join4.q.out  |   12 +-
 .../clientpositive/spark/router_join_ppr.q.out  |    4 +-
 .../results/clientpositive/spark/sample8.q.out  |    4 +-
 .../results/clientpositive/spark/semijoin.q.out |    2 +-
 .../clientpositive/spark/skewjoinopt12.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt14.q.out    |    4 +-
 .../clientpositive/spark/skewjoinopt16.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt17.q.out    |    8 +-
 .../clientpositive/spark/skewjoinopt2.q.out     |   16 +-
 .../clientpositive/spark/smb_mapjoin_10.q.out   |    2 +-
 .../clientpositive/spark/smb_mapjoin_14.q.out   |    2 +-
 .../spark/sort_merge_join_desc_2.q.out          |   22 +-
 .../spark/sort_merge_join_desc_3.q.out          |   22 +-
 .../spark/sort_merge_join_desc_4.q.out          |   26 +-
 .../spark/sort_merge_join_desc_8.q.out          |   42 +-
 .../spark/spark_dynamic_partition_pruning.q.out | 2313 +++++++++-------
 ...k_vectorized_dynamic_partition_pruning.q.out | 2515 +++++++++++-------
 .../clientpositive/spark/subquery_in.q.out      |    2 +-
 .../spark/vector_between_in.q.out               |  332 +++
 .../spark/vector_mapjoin_reduce.q.out           |    6 +-
 .../clientpositive/spark/vectorization_0.q.out  |    2 +-
 .../clientpositive/spark/vectorization_14.q.out |    2 +-
 .../clientpositive/spark/vectorization_17.q.out |    2 +-
 .../spark/vectorization_short_regress.q.out     |    2 +-
 .../spark/vectorized_string_funcs.q.out         |    2 +-
 .../results/clientpositive/subquery_in.q.out    |    2 +-
 .../clientpositive/tez/bucketpruning1.q.out     |   32 +-
 .../clientpositive/tez/constprog_semijoin.q.out |   24 +-
 .../tez/dynamic_partition_pruning.q.out         |   20 +-
 .../tez/dynamic_partition_pruning_2.q.out       |   30 +-
 .../tez/dynpart_sort_optimization.q.out         |    2 +-
 .../clientpositive/tez/explainuser_1.q.out      |   38 +-
 .../clientpositive/tez/explainuser_2.q.out      |   12 +-
 .../clientpositive/tez/explainuser_3.q.out      |    8 +-
 .../tez/hybridgrace_hashjoin_2.q.out            |   12 +-
 .../clientpositive/tez/subquery_in.q.out        |    2 +-
 .../clientpositive/tez/tez_join_hash.q.out      |    4 +
 .../clientpositive/tez/tez_union_group_by.q.out |    2 +-
 .../tez/vector_aggregate_without_gby.q.out      |    6 +-
 .../tez/vector_auto_smb_mapjoin_14.q.out        |   38 +-
 .../clientpositive/tez/vector_between_in.q.out  |  336 +++
 .../clientpositive/tez/vector_date_1.q.out      |    4 +-
 .../tez/vector_decimal_cast.q.out               |    2 +-
 .../tez/vector_decimal_expressions.q.out        |    2 +-
 .../tez/vector_groupby_mapjoin.q.out            |    4 +-
 .../clientpositive/tez/vector_interval_2.q.out  |   12 +-
 .../tez/vector_join_part_col_char.q.out         |   10 +-
 .../tez/vector_leftsemi_mapjoin.q.out           |   12 +-
 .../tez/vector_mapjoin_reduce.q.out             |    6 +-
 .../clientpositive/tez/vectorization_0.q.out    |    2 +-
 .../clientpositive/tez/vectorization_14.q.out   |    2 +-
 .../clientpositive/tez/vectorization_17.q.out   |    2 +-
 .../clientpositive/tez/vectorization_7.q.out    |    4 +-
 .../tez/vectorization_short_regress.q.out       |    2 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   20 +-
 .../tez/vectorized_parquet_types.q.out          |    2 +-
 .../tez/vectorized_string_funcs.q.out           |    2 +-
 .../tez/vectorized_timestamp.q.out              |    8 +-
 .../results/clientpositive/type_widening.q.out  |  112 +
 .../results/clientpositive/udf_greatest.q.out   |    4 +-
 .../test/results/clientpositive/udf_least.q.out |    4 +-
 .../clientpositive/udf_to_unix_timestamp.q.out  |    2 +-
 .../clientpositive/vector_between_in.q.out      |  304 +++
 .../results/clientpositive/vector_date_1.q.out  |    4 +-
 .../clientpositive/vector_decimal_cast.q.out    |    2 +-
 .../vector_decimal_expressions.q.out            |    2 +-
 .../clientpositive/vector_interval_2.q.out      |   12 +-
 .../vector_leftsemi_mapjoin.q.out               |   12 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |    6 +-
 .../vector_orc_string_reader_empty_dict.q.out   |   62 +
 .../clientpositive/vectorization_0.q.out        |    2 +-
 .../clientpositive/vectorization_14.q.out       |    2 +-
 .../clientpositive/vectorization_17.q.out       |    2 +-
 .../clientpositive/vectorization_7.q.out        |    4 +-
 .../vectorization_short_regress.q.out           |    2 +-
 .../vectorized_string_funcs.q.out               |    2 +-
 .../results/clientpositive/windowing_gby2.q.out |  652 +++++
 .../serde2/MetadataTypedColumnsetSerDe.java     |    2 +-
 .../serde2/dynamic_type/thrift_grammar.java     |    2 +-
 .../hive/serde2/lazy/LazySerDeParameters.java   |    3 +-
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |    2 +-
 .../primitive/JavaHiveCharObjectInspector.java  |   15 +-
 .../JavaHiveVarcharObjectInspector.java         |   15 +-
 .../PrimitiveObjectInspectorConverter.java      |    8 +-
 .../serde2/thrift/TCTLSeparatedProtocol.java    |   25 +-
 .../hive/serde2/typeinfo/TypeInfoUtils.java     |   17 +-
 .../TestObjectInspectorConverters.java          |   95 +
 .../TestStandardObjectInspectors.java           |   14 +-
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon |   16 +-
 .../hive/service/cli/HiveSQLException.java      |    6 +-
 .../cli/operation/MetadataOperation.java        |    6 +-
 .../hive/service/cli/operation/Operation.java   |    2 +-
 .../service/cli/operation/SQLOperation.java     |    5 +
 .../cli/operation/SQLOperationDisplay.java      |    9 +
 .../service/cli/session/HiveSessionImpl.java    |    2 +
 .../service/cli/session/SessionManager.java     |   21 +-
 .../cli/thrift/ThriftBinaryCLIService.java      |   64 +-
 .../service/cli/thrift/ThriftCLIService.java    |   61 +-
 .../service/cli/thrift/ThriftHttpServlet.java   |   17 +
 .../apache/hive/service/server/HiveServer2.java |    4 +-
 .../hive-webapps/hiveserver2/hiveserver2.jsp    |   22 +-
 .../cli/TestRetryingThriftCLIServiceClient.java |  130 +-
 .../cli/thrift/ThriftCLIServiceTest.java        |    4 +-
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |   34 +-
 .../apache/hadoop/hive/shims/HadoopShims.java   |    6 +
 .../hive/ql/exec/vector/BytesColumnVector.java  |   11 +
 .../ql/exec/vector/TimestampColumnVector.java   |    2 +-
 .../hive/ql/exec/vector/UnionColumnVector.java  |    2 -
 .../junit/runners/ConcurrentTestRunner.java     |    4 +-
 494 files changed, 11925 insertions(+), 5644 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
----------------------------------------------------------------------
diff --cc llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
index a504146,7e37e96..9004d3c
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/ServiceInstance.java
@@@ -53,14 -53,13 +53,20 @@@ public interface ServiceInstance 
     */
    public int getShufflePort();
  
+ 
+   /**
+    * Address for services hosted on http
+    * @return
+    */
+   public String getServicesAddress();
    /**
 +   * OutputFormat endpoint for service instance
 +   *
 +   * @return
 +   */
 +  public int getOutputFormatPort();
 +
 +  /**
     * Return the last known state (without refreshing)
     * 
     * @return

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
----------------------------------------------------------------------
diff --cc llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
index 33ab591,bd814b9..4536a6e
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapFixedRegistryImpl.java
@@@ -52,7 -54,8 +54,9 @@@ public class LlapFixedRegistryImpl impl
    private final int port;
    private final int shuffle;
    private final int mngPort;
+   private final int webPort;
 +  private final int outputFormatPort;
+   private final String webScheme;
    private final String[] hosts;
    private final int memory;
    private final int vcores;
@@@ -66,8 -69,12 +70,13 @@@
      this.shuffle = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT);
      this.resolveHosts = conf.getBoolean(FIXED_REGISTRY_RESOLVE_HOST_NAMES, true);
      this.mngPort = HiveConf.getIntVar(conf, ConfVars.LLAP_MANAGEMENT_RPC_PORT);
 +    this.outputFormatPort = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_PORT);
  
+ 
+     this.webPort = HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_WEB_PORT);
+     boolean isSsl = HiveConf.getBoolVar(conf, ConfVars.LLAP_DAEMON_WEB_SSL);
+     this.webScheme = isSsl ? "https" : "http";
+ 
      for (Map.Entry<String, String> kv : conf) {
        if (kv.getKey().startsWith(HiveConf.PREFIX_LLAP)
            || kv.getKey().startsWith(HiveConf.PREFIX_HIVE_LLAP)) {
@@@ -153,11 -171,11 +173,16 @@@
      }
  
      @Override
 +    public int getOutputFormatPort() {
 +      return LlapFixedRegistryImpl.this.outputFormatPort;
 +    }
 +
 +    @Override
+     public String getServicesAddress() {
+       return serviceAddress;
+     }
+ 
+     @Override
      public boolean isAlive() {
        return true;
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
----------------------------------------------------------------------
diff --cc llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
index e49c047,6af30d4..9de4d17
--- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
+++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java
@@@ -246,13 -238,8 +246,13 @@@ public class LlapZookeeperRegistryImpl 
          HiveConf.getIntVar(conf, ConfVars.LLAP_MANAGEMENT_RPC_PORT)));
    }
  
 +  public Endpoint getOutputFormatEndpoint() {
 +    return RegistryTypeUtils.ipcEndpoint(IPC_OUTPUTFORMAT, new InetSocketAddress(hostname,
 +        HiveConf.getIntVar(conf, ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_PORT)));
 +  }
 +
    @Override
-   public void register() throws IOException {
+   public String register() throws IOException {
      ServiceRecord srv = new ServiceRecord();
      Endpoint rpcEndpoint = getRpcEndpoint();
      srv.addInternalEndpoint(rpcEndpoint);
@@@ -323,7 -310,7 +324,8 @@@
      private final int rpcPort;
      private final int mngPort;
      private final int shufflePort;
 +    private final int outputFormatPort;
+     private final String serviceAddress;
  
      public DynamicServiceInstance(ServiceRecord srv) throws IOException {
        this.srv = srv;
@@@ -331,23 -322,22 +337,26 @@@
        final Endpoint shuffle = srv.getInternalEndpoint(IPC_SHUFFLE);
        final Endpoint rpc = srv.getInternalEndpoint(IPC_LLAP);
        final Endpoint mng = srv.getInternalEndpoint(IPC_MNG);
 +      final Endpoint outputFormat = srv.getInternalEndpoint(IPC_OUTPUTFORMAT);
+       final Endpoint services = srv.getExternalEndpoint(IPC_SERVICES);
  
        this.host =
            RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
                AddressTypes.ADDRESS_HOSTNAME_FIELD);
        this.rpcPort =
-           Integer.valueOf(RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
+           Integer.parseInt(RegistryTypeUtils.getAddressField(rpc.addresses.get(0),
                AddressTypes.ADDRESS_PORT_FIELD));
        this.mngPort =
-           Integer.valueOf(RegistryTypeUtils.getAddressField(mng.addresses.get(0),
+           Integer.parseInt(RegistryTypeUtils.getAddressField(mng.addresses.get(0),
                AddressTypes.ADDRESS_PORT_FIELD));
        this.shufflePort =
-           Integer.valueOf(RegistryTypeUtils.getAddressField(shuffle.addresses.get(0),
+           Integer.parseInt(RegistryTypeUtils.getAddressField(shuffle.addresses.get(0),
                AddressTypes.ADDRESS_PORT_FIELD));
 +      this.outputFormatPort =
 +          Integer.valueOf(RegistryTypeUtils.getAddressField(outputFormat.addresses.get(0),
 +              AddressTypes.ADDRESS_PORT_FIELD));
+       this.serviceAddress =
+           RegistryTypeUtils.getAddressField(services.addresses.get(0), AddressTypes.ADDRESS_URI);
      }
  
      @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/99cb7f96/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------


[14/58] [abbrv] hive git commit: HIVE-13385 : [Cleanup] Streamline Beeline instantiation (Reuben Kuhnert via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13385 : [Cleanup] Streamline Beeline instantiation (Reuben Kuhnert via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/caafd88f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/caafd88f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/caafd88f

Branch: refs/heads/llap
Commit: caafd88f9b50241ece9fcf5ed173409c3aef0d25
Parents: 3d75544
Author: Reuben Kuhnert <si...@gmail.com>
Authored: Wed Mar 30 06:40:00 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Sat Apr 9 17:29:41 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hive/beeline/BeeLine.java   | 18 ++------------
 .../hive/beeline/BeeLineCommandCompleter.java   | 26 +++++++++++---------
 2 files changed, 16 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/caafd88f/beeline/src/java/org/apache/hive/beeline/BeeLine.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
index a4a9558..98d4e09 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java
@@ -118,7 +118,6 @@ public class BeeLine implements Closeable {
   private boolean exit = false;
   private final DatabaseConnections connections = new DatabaseConnections();
   public static final String COMMAND_PREFIX = "!";
-  private final Completer beeLineCommandCompleter;
   private Collection<Driver> drivers = null;
   private final BeeLineOpts opts = new BeeLineOpts(this, System.getProperties());
   private String lastProgress = null;
@@ -130,7 +129,7 @@ public class BeeLine implements Closeable {
   private PrintStream errorStream = new PrintStream(System.err, true);
   private ConsoleReader consoleReader;
   private List<String> batch = null;
-  private final Reflector reflector;
+  private final Reflector reflector = new Reflector(this);
   private String dbName = null;
   private String currentDatabase = null;
 
@@ -266,6 +265,7 @@ public class BeeLine implements Closeable {
           null)
   };
 
+  private final Completer beeLineCommandCompleter = new BeeLineCommandCompleter(Arrays.asList(commandHandlers));
 
   static final SortedSet<String> KNOWN_DRIVERS = new TreeSet<String>(Arrays.asList(
       new String[] {
@@ -503,21 +503,7 @@ public class BeeLine implements Closeable {
   }
 
   public BeeLine(boolean isBeeLine) {
-    beeLineCommandCompleter = new BeeLineCommandCompleter(BeeLineCommandCompleter.getCompleters
-        (this));
-    reflector = new Reflector(this);
     this.isBeeLine = isBeeLine;
-    // attempt to dynamically load signal handler
-    /* TODO disable signal handler
-    try {
-      Class<?> handlerClass =
-          Class.forName("org.apache.hive.beeline.SunSignalHandler");
-      signalHandler = (BeeLineSignalHandler)
-          handlerClass.newInstance();
-    } catch (Throwable t) {
-      // ignore and leave cancel functionality disabled
-    }
-    */
   }
 
   DatabaseConnection getDatabaseConnection() {

http://git-wip-us.apache.org/repos/asf/hive/blob/caafd88f/beeline/src/java/org/apache/hive/beeline/BeeLineCommandCompleter.java
----------------------------------------------------------------------
diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLineCommandCompleter.java b/beeline/src/java/org/apache/hive/beeline/BeeLineCommandCompleter.java
index 6a872bc..87e7518 100644
--- a/beeline/src/java/org/apache/hive/beeline/BeeLineCommandCompleter.java
+++ b/beeline/src/java/org/apache/hive/beeline/BeeLineCommandCompleter.java
@@ -27,24 +27,26 @@ import jline.console.completer.NullCompleter;
 import jline.console.completer.StringsCompleter;
 
 class BeeLineCommandCompleter extends AggregateCompleter {
-
-  public BeeLineCommandCompleter(List<Completer> completers) {
-    super(completers);
+  public BeeLineCommandCompleter(Iterable<CommandHandler> handlers) {
+    super(getCompleters(handlers));
   }
 
-  public static List<Completer> getCompleters(BeeLine beeLine){
+  public static List<Completer> getCompleters(Iterable<CommandHandler> handlers){
     List<Completer> completers = new LinkedList<Completer>();
 
-    for (int i = 0; i < beeLine.commandHandlers.length; i++) {
-      String[] cmds = beeLine.commandHandlers[i].getNames();
-      for (int j = 0; cmds != null && j < cmds.length; j++) {
-        List<Completer> compl = new LinkedList<Completer>();
-        compl.add(new StringsCompleter(BeeLine.COMMAND_PREFIX + cmds[j]));
-        compl.addAll(Arrays.asList(beeLine.commandHandlers[i].getParameterCompleters()));
-        compl.add(new NullCompleter()); // last param no complete
-        completers.add(new AggregateCompleter(compl.toArray(new Completer[0])));
+    for (CommandHandler handler : handlers) {
+      String[] commandNames = handler.getNames();
+      if (commandNames != null) {
+        for (String commandName : commandNames) {
+          List<Completer> compl = new LinkedList<Completer>();
+          compl.add(new StringsCompleter(BeeLine.COMMAND_PREFIX + commandName));
+          compl.addAll(Arrays.asList(handler.getParameterCompleters()));
+          compl.add(new NullCompleter()); // last param no complete
+          completers.add(new AggregateCompleter(compl.toArray(new Completer[compl.size()])));
+        }
       }
     }
+
     return completers;
   }
 }
\ No newline at end of file


[47/58] [abbrv] hive git commit: HIVE-13400: Following up HIVE-12481, add retry for Zookeeper service discovery (Reviewed by Chaoyu Tang)

Posted by jd...@apache.org.
HIVE-13400: Following up HIVE-12481, add retry for Zookeeper service discovery (Reviewed by Chaoyu Tang)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/418f936c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/418f936c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/418f936c

Branch: refs/heads/llap
Commit: 418f936cff5c344587ea4f58ce08a5cdda931164
Parents: 98699b3
Author: Aihua Xu <ai...@apache.org>
Authored: Fri Apr 1 15:46:22 2016 -0400
Committer: Aihua Xu <ai...@apache.org>
Committed: Thu Apr 14 17:21:12 2016 -0400

----------------------------------------------------------------------
 .../org/apache/hive/jdbc/HiveConnection.java    | 84 +++++++++-----------
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   | 20 +++--
 2 files changed, 51 insertions(+), 53 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/418f936c/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 352744f..40ad3b2 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -18,6 +18,7 @@
 
 package org.apache.hive.jdbc;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hive.jdbc.Utils.JdbcConnectionParams;
 import org.apache.hive.service.auth.HiveAuthFactory;
 import org.apache.hive.service.auth.KerberosSaslHelper;
@@ -109,8 +110,6 @@ public class HiveConnection implements java.sql.Connection {
   private String host;
   private int port;
   private final Map<String, String> sessConfMap;
-  private final Map<String, String> hiveConfMap;
-  private final Map<String, String> hiveVarMap;
   private JdbcConnectionParams connParams;
   private final boolean isEmbeddedMode;
   private TTransport transport;
@@ -141,8 +140,6 @@ public class HiveConnection implements java.sql.Connection {
     host = connParams.getHost();
     port = connParams.getPort();
     sessConfMap = connParams.getSessionVars();
-    hiveConfMap = connParams.getHiveConfs();
-    hiveVarMap = connParams.getHiveVars();
     isEmbeddedMode = connParams.isEmbeddedMode();
 
     if (sessConfMap.containsKey(JdbcConnectionParams.FETCH_SIZE)) {
@@ -177,56 +174,51 @@ public class HiveConnection implements java.sql.Connection {
   }
 
   private void openTransport() throws SQLException {
-    int numRetries = 0;
     int maxRetries = 1;
     try {
-      maxRetries = Integer.parseInt(sessConfMap.get(JdbcConnectionParams.RETRIES));
-    } catch(NumberFormatException e) {
+      String strRetries = sessConfMap.get(JdbcConnectionParams.RETRIES);
+      if (StringUtils.isNotBlank(strRetries)) {
+        maxRetries = Integer.parseInt(strRetries);
+      }
+    } catch(NumberFormatException e) { // Ignore the exception
     }
 
-    while (true) {
-      try {
-        assumeSubject =
-            JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equals(sessConfMap
-                .get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE));
-        transport = isHttpTransportMode() ? createHttpTransport() : createBinaryTransport();
-        if (!transport.isOpen()) {
-          transport.open();
-          logZkDiscoveryMessage("Connected to " + connParams.getHost() + ":" + connParams.getPort());
-        }
-        break;
-      } catch (TTransportException e) {
-        // We'll retry till we exhaust all HiveServer2 nodes from ZooKeeper
-        if (isZkDynamicDiscoveryMode()) {
-          LOG.info("Failed to connect to " + connParams.getHost() + ":" + connParams.getPort());
-          try {
-            // Update jdbcUriString, host & port variables in connParams
-            // Throw an exception if all HiveServer2 nodes have been exhausted,
-            // or if we're unable to connect to ZooKeeper.
-            Utils.updateConnParamsFromZooKeeper(connParams);
-          } catch (ZooKeeperHiveClientException ze) {
-            throw new SQLException(
-                "Could not open client transport for any of the Server URI's in ZooKeeper: "
-                    + ze.getMessage(), " 08S01", ze);
+    for (int numRetries = 0;;) {
+        try {
+          assumeSubject =
+              JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT.equals(sessConfMap
+                  .get(JdbcConnectionParams.AUTH_KERBEROS_AUTH_TYPE));
+          transport = isHttpTransportMode() ? createHttpTransport() : createBinaryTransport();
+          if (!transport.isOpen()) {
+            transport.open();
+            logZkDiscoveryMessage("Connected to " + connParams.getHost() + ":" + connParams.getPort());
           }
-          // Update with new values
-          jdbcUriString = connParams.getJdbcUriString();
-          host = connParams.getHost();
-          port = connParams.getPort();
-        } else {
-          LOG.info("Transport Used for JDBC connection: " +
-            sessConfMap.get(JdbcConnectionParams.TRANSPORT_MODE));
-
-          // Retry maxRetries times
-          String errMsg = "Could not open client transport with JDBC Uri: " +
-              jdbcUriString + ": " + e.getMessage();
-          if (++numRetries >= maxRetries) {
-            throw new SQLException(errMsg, " 08S01", e);
+          break;
+        } catch (TTransportException e) {
+          LOG.warn("Failed to connect to " + connParams.getHost() + ":" + connParams.getPort());
+          String errMsg = null;
+          String warnMsg = "Could not open client transport with JDBC Uri: " + jdbcUriString + ": ";
+          if (isZkDynamicDiscoveryMode()) {
+            errMsg = "Could not open client transport for any of the Server URI's in ZooKeeper: ";
+            // Try next available server in zookeeper, or retry all the servers again if retry is enabled
+            while(!Utils.updateConnParamsFromZooKeeper(connParams) && ++numRetries < maxRetries) {
+              connParams.getRejectedHostZnodePaths().clear();
+            }
+            // Update with new values
+            jdbcUriString = connParams.getJdbcUriString();
+            host = connParams.getHost();
+            port = connParams.getPort();
           } else {
-            LOG.warn(errMsg + " Retrying " + numRetries + " of " + maxRetries);
+            errMsg = warnMsg;
+            ++numRetries;
+          }
+
+          if (numRetries >= maxRetries) {
+            throw new SQLException(errMsg + e.getMessage(), " 08S01", e);
+          } else {
+            LOG.warn(warnMsg + e.getMessage() + " Retrying " + numRetries + " of " + maxRetries);
           }
         }
-      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/418f936c/jdbc/src/java/org/apache/hive/jdbc/Utils.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/Utils.java b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
index 754f89f..42181d7 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/Utils.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
@@ -543,19 +543,25 @@ class Utils {
    * explored. Also update the host, port, jdbcUriString and other configs published by the server.
    *
    * @param connParams
-   * @throws ZooKeeperHiveClientException
+   * @return true if new server info is retrieved successfully
    */
-  static void updateConnParamsFromZooKeeper(JdbcConnectionParams connParams)
-      throws ZooKeeperHiveClientException {
+  static boolean updateConnParamsFromZooKeeper(JdbcConnectionParams connParams) {
     // Add current host to the rejected list
     connParams.getRejectedHostZnodePaths().add(connParams.getCurrentHostZnodePath());
     String oldServerHost = connParams.getHost();
     int oldServerPort = connParams.getPort();
     // Update connection params (including host, port) from ZooKeeper
-    ZooKeeperHiveClientHelper.configureConnParams(connParams);
-    connParams.setJdbcUriString(connParams.getJdbcUriString().replace(
-        oldServerHost + ":" + oldServerPort, connParams.getHost() + ":" + connParams.getPort()));
-    LOG.info("Selected HiveServer2 instance with uri: " + connParams.getJdbcUriString());
+    try {
+      ZooKeeperHiveClientHelper.configureConnParams(connParams);
+      connParams.setJdbcUriString(connParams.getJdbcUriString().replace(
+          oldServerHost + ":" + oldServerPort, connParams.getHost() + ":" + connParams.getPort()));
+      LOG.info("Selected HiveServer2 instance with uri: " + connParams.getJdbcUriString());
+    } catch(ZooKeeperHiveClientException e) {
+      LOG.error(e.getMessage());
+      return false;
+    }
+
+    return true;
   }
 
   private static String joinStringArray(String[] stringArray, String seperator) {


[09/58] [abbrv] hive git commit: HIVE-12968 : genNotNullFilterForJoinSourcePlan: needs to merge predicates into the multi-AND (Gopal V, Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index c5a0aea..fe2049f 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -1098,10 +1098,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -1242,10 +1242,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -2542,10 +2542,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -4503,10 +4503,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 27 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -6004,10 +6004,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 108 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 54 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
index 185e0a5..95da4c5 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
@@ -1166,7 +1166,7 @@ STAGE PLANS:
                   alias: x
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key is not null and value is not null) and (value < 'zzzzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1226,7 +1226,7 @@ STAGE PLANS:
                   alias: y1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and (value < 'zzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1241,7 +1241,7 @@ STAGE PLANS:
                   alias: z2
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((value is not null and (key < 'zzzzzzzzzz')) and (value < 'zzzzzzzzzz')) (type: boolean)
+                    predicate: ((key < 'zzzzzzzzzz') and (value < 'zzzzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)
@@ -1361,7 +1361,7 @@ STAGE PLANS:
                   alias: x
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((key is not null and value is not null) and (value < 'zzzzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1423,7 +1423,7 @@ STAGE PLANS:
                   alias: y1
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((key is not null and (value < 'zzzzzzzz')) and (key < 'zzzzzzzz')) (type: boolean)
+                    predicate: ((value < 'zzzzzzzz') and (key < 'zzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: key (type: string)
@@ -1438,7 +1438,7 @@ STAGE PLANS:
                   alias: z2
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((value is not null and (key < 'zzzzzzzzzz')) and (value < 'zzzzzzzzzz')) (type: boolean)
+                    predicate: ((key < 'zzzzzzzzzz') and (value < 'zzzzzzzzzz')) (type: boolean)
                     Statistics: Num rows: 222 Data size: 2358 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out
index 9573718..11882e4 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union_group_by.q.out
@@ -255,7 +255,7 @@ STAGE PLANS:
                   alias: x
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Filter Operator
-                    predicate: (((t is not null and (date >= '2014-03-04')) and (date < '2014-09-03')) and (u <> 0)) (type: boolean)
+                    predicate: (t is not null and (date >= '2014-03-04') and (date < '2014-09-03') and (u <> 0)) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Reduce Output Operator
                       key expressions: t (type: string), st (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index 4edc561..5f52822 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -853,10 +853,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -997,10 +997,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -2297,10 +2297,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -4132,10 +4132,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: ((((date = '2008-04-08') and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: ((date = '2008-04-08') and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)
@@ -5633,10 +5633,10 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: srcpart_date_hour
-                  filterExpr: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                  filterExpr: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                   Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
-                    predicate: (((((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0)) and ds is not null) and hr is not null) (type: boolean)
+                    predicate: (((date = '2008-04-08') or (date = '2008-04-09')) and (UDFToDouble(hour) = 11.0) and ds is not null and hr is not null) (type: boolean)
                     Statistics: Num rows: 2 Data size: 720 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ds (type: string), hr (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/louter_join_ppr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out
index a1d7be2..d994b95 100644
--- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out
+++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out
@@ -977,7 +977,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0) and (UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)
@@ -998,7 +998,7 @@ STAGE PLANS:
             GatherStats: false
             Filter Operator
               isSamplingPred: false
-              predicate: ((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0) and (UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
               Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_1.q.out b/ql/src/test/results/clientpositive/masking_1.q.out
index ba2297e..3b63550 100644
--- a/ql/src/test/results/clientpositive/masking_1.q.out
+++ b/ql/src/test/results/clientpositive/masking_1.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -123,7 +123,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int)
@@ -170,7 +170,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: reverse(value) (type: string)
@@ -350,7 +350,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -397,7 +397,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -444,7 +444,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 0.0) and (UDFToDouble(key) < 10.0)) and ((UDFToDouble(key) % 2.0) = 0.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 0.0) and (UDFToDouble(key) < 10.0) and ((UDFToDouble(key) % 2.0) = 0.0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), upper(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_2.q.out b/ql/src/test/results/clientpositive/masking_2.q.out
index 3feaa24..f998cbd 100644
--- a/ql/src/test/results/clientpositive/masking_2.q.out
+++ b/ql/src/test/results/clientpositive/masking_2.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) and (UDFToInteger(key) > 0)) (type: boolean)
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
@@ -141,7 +141,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) and (UDFToInteger(key) > 0)) and reverse(value) is not null) (type: boolean)
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0) and reverse(value) is not null) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
@@ -192,7 +192,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) and UDFToInteger(key) is not null) (type: boolean)
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and UDFToInteger(key) is not null) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
@@ -208,7 +208,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) and (UDFToInteger(key) > 0)) and reverse(value) is not null) (type: boolean)
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0) and reverse(value) is not null) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
@@ -281,7 +281,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) and (UDFToInteger(key) > 0)) (type: boolean)
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_3.q.out b/ql/src/test/results/clientpositive/masking_3.q.out
index 55c9e8f..1925dce 100644
--- a/ql/src/test/results/clientpositive/masking_3.q.out
+++ b/ql/src/test/results/clientpositive/masking_3.q.out
@@ -7743,7 +7743,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((UDFToDouble(key) > 0.0) and (UDFToDouble(key) < 10.0)) and ((UDFToDouble(key) % 2.0) = 0.0)) (type: boolean)
+              predicate: ((UDFToDouble(key) > 0.0) and (UDFToDouble(key) < 10.0) and ((UDFToDouble(key) % 2.0) = 0.0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), upper(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_4.q.out b/ql/src/test/results/clientpositive/masking_4.q.out
index d2ab52e..7e923e8 100644
--- a/ql/src/test/results/clientpositive/masking_4.q.out
+++ b/ql/src/test/results/clientpositive/masking_4.q.out
@@ -88,7 +88,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key = 5)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), reverse(value) (type: string)
@@ -132,7 +132,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key = 5)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), reverse(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_5.q.out b/ql/src/test/results/clientpositive/masking_5.q.out
index 161ce1c..acb6471 100644
--- a/ql/src/test/results/clientpositive/masking_5.q.out
+++ b/ql/src/test/results/clientpositive/masking_5.q.out
@@ -153,7 +153,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((((hash(key) & 2147483647) % 2) = 0) and ((key % 2) = 0)) and (key < 10)) (type: boolean)
+              predicate: ((((hash(key) & 2147483647) % 2) = 0) and ((key % 2) = 0) and (key < 10)) (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out
index 8a2bc9d..6717527 100644
--- a/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out
+++ b/ql/src/test/results/clientpositive/masking_disablecbo_1.q.out
@@ -76,7 +76,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -123,7 +123,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int)
@@ -170,7 +170,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: reverse(value) (type: string)
@@ -217,7 +217,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and UDFToDouble(key) is not null) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and UDFToDouble(key) is not null) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -346,7 +346,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -393,7 +393,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key > 0)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key > 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), reverse(value) (type: string)
@@ -440,7 +440,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 0) and (key < 10)) and ((key % 2) = 0)) (type: boolean)
+              predicate: ((key > 0) and (key < 10) and ((key % 2) = 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), upper(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
index 57a8fca..48a366e 100644
--- a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
+++ b/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: ((((_col0 % 2) = 0) and (_col0 < 10)) and (_col0 > 0)) (type: boolean)
+                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), reverse(_col1) (type: string)
@@ -137,7 +137,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: ((((_col0 % 2) = 0) and (_col0 < 10)) and (_col0 > 0)) (type: boolean)
+                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), reverse(_col1) (type: string)
@@ -211,7 +211,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: ((((_col0 % 2) = 0) and (_col0 < 10)) and UDFToDouble(_col0) is not null) (type: boolean)
+                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and UDFToDouble(_col0) is not null) (type: boolean)
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), reverse(_col1) (type: string)
@@ -231,7 +231,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: ((((_col0 % 2) = 0) and (_col0 < 10)) and (_col0 > 0)) (type: boolean)
+                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), reverse(_col1) (type: string)
@@ -315,7 +315,7 @@ STAGE PLANS:
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Filter Operator
-                predicate: ((((_col0 % 2) = 0) and (_col0 < 10)) and (_col0 > 0)) (type: boolean)
+                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: _col0 (type: int), reverse(_col1) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_disablecbo_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_3.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_3.q.out
index 8826500..6aaab20 100644
--- a/ql/src/test/results/clientpositive/masking_disablecbo_3.q.out
+++ b/ql/src/test/results/clientpositive/masking_disablecbo_3.q.out
@@ -7715,7 +7715,7 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((key > 0) and (key < 10)) and ((key % 2) = 0)) (type: boolean)
+              predicate: ((key > 0) and (key < 10) and ((key % 2) = 0)) (type: boolean)
               Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string), upper(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out
index 8233936..698c797 100644
--- a/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out
+++ b/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out
@@ -88,7 +88,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key = 5)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), reverse(value) (type: string)
@@ -132,7 +132,7 @@ STAGE PLANS:
             alias: masking_test
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((key % 2) = 0) and (key < 10)) and (key = 5)) (type: boolean)
+              predicate: (((key % 2) = 0) and (key < 10) and (key = 5)) (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 5 (type: int), reverse(value) (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/multiMapJoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multiMapJoin1.q.out b/ql/src/test/results/clientpositive/multiMapJoin1.q.out
index cc54cac..312df6e 100644
--- a/ql/src/test/results/clientpositive/multiMapJoin1.q.out
+++ b/ql/src/test/results/clientpositive/multiMapJoin1.q.out
@@ -863,7 +863,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -1372,7 +1372,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -1424,7 +1424,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -1729,7 +1729,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -2023,7 +2023,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -2375,7 +2375,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -2884,7 +2884,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)
@@ -2936,7 +2936,7 @@ STAGE PLANS:
             alias: bigtbl
             Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((key1 is not null and value is not null) and key2 is not null) (type: boolean)
+              predicate: (key1 is not null and value is not null and key2 is not null) (type: boolean)
               Statistics: Num rows: 5000 Data size: 72180 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key1 (type: string), key2 (type: string), value (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
index abd3479..7b361b7 100644
--- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out
@@ -460,7 +460,7 @@ STAGE PLANS:
             alias: orc_pred
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((t = -1) and s is not null) and (s like 'bob%')) (type: boolean)
+              predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
               Statistics: Num rows: 262 Data size: 77718 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: -1 (type: tinyint), s (type: string)
@@ -500,10 +500,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: orc_pred
-            filterExpr: (((t = -1) and s is not null) and (s like 'bob%')) (type: boolean)
+            filterExpr: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((t = -1) and s is not null) and (s like 'bob%')) (type: boolean)
+              predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
               Statistics: Num rows: 262 Data size: 77718 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: -1 (type: tinyint), s (type: string)
@@ -591,7 +591,7 @@ STAGE PLANS:
             alias: orc_pred
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((s is not null and (s like 'bob%')) and (not (t) IN (-1, -2, -3))) and t BETWEEN 25 AND 30) (type: boolean)
+              predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
               Statistics: Num rows: 131 Data size: 38859 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), s (type: string)
@@ -644,10 +644,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: orc_pred
-            filterExpr: (((s is not null and (s like 'bob%')) and (not (t) IN (-1, -2, -3))) and t BETWEEN 25 AND 30) (type: boolean)
+            filterExpr: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((s is not null and (s like 'bob%')) and (not (t) IN (-1, -2, -3))) and t BETWEEN 25 AND 30) (type: boolean)
+              predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
               Statistics: Num rows: 131 Data size: 38859 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), s (type: string)
@@ -768,7 +768,7 @@ STAGE PLANS:
             alias: orc_pred
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean)
+              predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
               Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -834,10 +834,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: orc_pred
-            filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean)
+            filterExpr: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean)
+              predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
               Statistics: Num rows: 5 Data size: 1483 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -972,7 +972,7 @@ STAGE PLANS:
             alias: orc_pred
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              predicate: ((t > 10) and (t <> 101) and (d >= 10.0) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
               Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -1068,10 +1068,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: orc_pred
-            filterExpr: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+            filterExpr: ((t > 10) and (t <> 101) and (d >= 10.0) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
             Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              predicate: ((t > 10) and (t <> 101) and (d >= 10.0) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
               Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
index 13cf3ea..a9d03fc 100644
--- a/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/parquet_predicate_pushdown.q.out
@@ -448,7 +448,7 @@ STAGE PLANS:
             alias: tbl_pred
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((t = -1) and s is not null) and (s like 'bob%')) (type: boolean)
+              predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
               Statistics: Num rows: 262 Data size: 2882 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: -1 (type: tinyint), s (type: string)
@@ -488,10 +488,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tbl_pred
-            filterExpr: (((t = -1) and s is not null) and (s like 'bob%')) (type: boolean)
+            filterExpr: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((t = -1) and s is not null) and (s like 'bob%')) (type: boolean)
+              predicate: ((t = -1) and s is not null and (s like 'bob%')) (type: boolean)
               Statistics: Num rows: 262 Data size: 2882 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: -1 (type: tinyint), s (type: string)
@@ -579,7 +579,7 @@ STAGE PLANS:
             alias: tbl_pred
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((s is not null and (s like 'bob%')) and (not (t) IN (-1, -2, -3))) and t BETWEEN 25 AND 30) (type: boolean)
+              predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
               Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), s (type: string)
@@ -632,10 +632,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tbl_pred
-            filterExpr: (((s is not null and (s like 'bob%')) and (not (t) IN (-1, -2, -3))) and t BETWEEN 25 AND 30) (type: boolean)
+            filterExpr: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: (((s is not null and (s like 'bob%')) and (not (t) IN (-1, -2, -3))) and t BETWEEN 25 AND 30) (type: boolean)
+              predicate: (s is not null and (s like 'bob%') and (not (t) IN (-1, -2, -3)) and t BETWEEN 25 AND 30) (type: boolean)
               Statistics: Num rows: 131 Data size: 1441 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), s (type: string)
@@ -756,7 +756,7 @@ STAGE PLANS:
             alias: tbl_pred
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean)
+              predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
               Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -822,10 +822,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tbl_pred
-            filterExpr: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean)
+            filterExpr: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((d >= 10.0) and (d < 12.0)) and (s like '%son')) and (t > 0)) and si BETWEEN 300 AND 400) and (not (s like '%car%'))) (type: boolean)
+              predicate: ((d >= 10.0) and (d < 12.0) and (s like '%son') and (t > 0) and si BETWEEN 300 AND 400 and (not (s like '%car%'))) (type: boolean)
               Statistics: Num rows: 5 Data size: 55 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -1005,7 +1005,7 @@ STAGE PLANS:
             alias: tbl_pred
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              predicate: ((t > 10) and (t <> 101) and (d >= 10.0) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
               Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -1101,10 +1101,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tbl_pred
-            filterExpr: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+            filterExpr: ((t > 10) and (t <> 101) and (d >= 10.0) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((t > 10) and (t <> 101)) and (d >= 10.0)) and (d < 12.0)) and (s like '%son')) and (not (s like '%car%'))) and (t > 0)) and si BETWEEN 300 AND 400) (type: boolean)
+              predicate: ((t > 10) and (t <> 101) and (d >= 10.0) and (d < 12.0) and (s like '%son') and (not (s like '%car%')) and (t > 0) and si BETWEEN 300 AND 400) (type: boolean)
               Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: t (type: tinyint), si (type: smallint), d (type: double), s (type: string)
@@ -1212,10 +1212,10 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: tbl_pred
-            filterExpr: ((((((((((((f < 123.2) and (f > 1.92)) and (f >= 9.99)) and f BETWEEN 1.92 AND 123.2) and (i < 67627)) and (i > 60627)) and (i >= 60626)) and i BETWEEN 60626 AND 67627) and (b < 4294967861)) and (b > 4294967261)) and (b >= 4294967260)) and b BETWEEN 4294967261 AND 4294967861) (type: boolean)
+            filterExpr: ((f < 123.2) and (f > 1.92) and (f >= 9.99) and f BETWEEN 1.92 AND 123.2 and (i < 67627) and (i > 60627) and (i >= 60626) and i BETWEEN 60626 AND 67627 and (b < 4294967861) and (b > 4294967261) and (b >= 4294967260) and b BETWEEN 4294967261 AND 4294967861) (type: boolean)
             Statistics: Num rows: 1049 Data size: 11539 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
-              predicate: ((((((((((((f < 123.2) and (f > 1.92)) and (f >= 9.99)) and f BETWEEN 1.92 AND 123.2) and (i < 67627)) and (i > 60627)) and (i >= 60626)) and i BETWEEN 60626 AND 67627) and (b < 4294967861)) and (b > 4294967261)) and (b >= 4294967260)) and b BETWEEN 4294967261 AND 4294967861) (type: boolean)
+              predicate: ((f < 123.2) and (f > 1.92) and (f >= 9.99) and f BETWEEN 1.92 AND 123.2 and (i < 67627) and (i > 60627) and (i >= 60626) and i BETWEEN 60626 AND 67627 and (b < 4294967861) and (b > 4294967261) and (b >= 4294967260) and b BETWEEN 4294967261 AND 4294967861) (type: boolean)
               Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: f (type: float), i (type: int), b (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query13.q.out b/ql/src/test/results/clientpositive/perf/query13.q.out
index ad50576..bc956e6 100644
--- a/ql/src/test/results/clientpositive/perf/query13.q.out
+++ b/ql/src/test/results/clientpositive/perf/query13.q.out
@@ -154,7 +154,7 @@ Stage-0
                           Select Operator [SEL_25] (rows=10000000 width=1014)
                             Output:["_col0","_col1"]
                             Filter Operator [FIL_67] (rows=10000000 width=1014)
-                              predicate:(((ca_state) IN ('KY', 'GA', 'NM', 'MT', 'OR', 'IN', 'WI', 'MO', 'WV') and (ca_country = 'United States')) and ca_address_sk is not null)
+                              predicate:((ca_state) IN ('KY', 'GA', 'NM', 'MT', 'OR', 'IN', 'WI', 'MO', 'WV') and (ca_country = 'United States') and ca_address_sk is not null)
                               TableScan [TS_23] (rows=40000000 width=1014)
                                 default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state","ca_country"]
                       <-Reducer 4 [SIMPLE_EDGE]
@@ -186,7 +186,7 @@ Stage-0
                                       Select Operator [SEL_8] (rows=19800 width=362)
                                         Output:["_col0","_col1","_col2"]
                                         Filter Operator [FIL_65] (rows=19800 width=362)
-                                          predicate:((((cd_marital_status = 'M') or (cd_marital_status = 'D') or (cd_marital_status = 'U')) and ((cd_education_status = '4 yr Degree') or (cd_education_status = 'Primary') or (cd_education_status = 'Advanced Degree'))) and cd_demo_sk is not null)
+                                          predicate:(((cd_marital_status = 'M') or (cd_marital_status = 'D') or (cd_marital_status = 'U')) and ((cd_education_status = '4 yr Degree') or (cd_education_status = 'Primary') or (cd_education_status = 'Advanced Degree')) and cd_demo_sk is not null)
                                           TableScan [TS_6] (rows=19800 width=362)
                                             default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                                   <-Reducer 2 [SIMPLE_EDGE]
@@ -200,7 +200,7 @@ Stage-0
                                           Select Operator [SEL_2] (rows=1 width=0)
                                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
                                             Filter Operator [FIL_63] (rows=1 width=0)
-                                              predicate:(((((((ss_sales_price BETWEEN 100.0 AND 150.0 or ss_sales_price BETWEEN 50.0 AND 100.0 or ss_sales_price BETWEEN 150.0 AND 200.0) and (ss_net_profit BETWEEN 100 AND 200 or ss_net_profit BETWEEN 150 AND 300 or ss_net_profit BETWEEN 50 AND 250)) and ss_store_sk is not null) and ss_cdemo_sk is not null) and ss_hdemo_sk is not null) and ss_addr_sk is not null) and ss_sold_date_sk is not null)
+                                              predicate:((ss_sales_price BETWEEN 100.0 AND 150.0 or ss_sales_price BETWEEN 50.0 AND 100.0 or ss_sales_price BETWEEN 150.0 AND 200.0) and (ss_net_profit BETWEEN 100 AND 200 or ss_net_profit BETWEEN 150 AND 300 or ss_net_profit BETWEEN 50 AND 250) and ss_store_sk is not null and ss_cdemo_sk is not null and ss_hdemo_sk is not null and ss_addr_sk is not null and ss_sold_date_sk is not null)
                                               TableScan [TS_0] (rows=1 width=0)
                                                 default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_quantity","ss_sales_price","ss_ext_sales_price","ss_ext_wholesale_cost","ss_net_profit"]
                                       <-Map 8 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query15.q.out b/ql/src/test/results/clientpositive/perf/query15.q.out
index e8c06ad..b537e78 100644
--- a/ql/src/test/results/clientpositive/perf/query15.q.out
+++ b/ql/src/test/results/clientpositive/perf/query15.q.out
@@ -40,7 +40,7 @@ Stage-0
                           Select Operator [SEL_19] (rows=18262 width=1119)
                             Output:["_col0"]
                             Filter Operator [FIL_44] (rows=18262 width=1119)
-                              predicate:(((d_qoy = 2) and (d_year = 2000)) and d_date_sk is not null)
+                              predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
                               TableScan [TS_17] (rows=73049 width=1119)
                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                       <-Reducer 3 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query17.q.out b/ql/src/test/results/clientpositive/perf/query17.q.out
index f8805e7..f98ed99 100644
--- a/ql/src/test/results/clientpositive/perf/query17.q.out
+++ b/ql/src/test/results/clientpositive/perf/query17.q.out
@@ -116,7 +116,7 @@ Stage-0
                                                 Select Operator [SEL_8] (rows=1 width=0)
                                                   Output:["_col0","_col1","_col2","_col3"]
                                                   Filter Operator [FIL_92] (rows=1 width=0)
-                                                    predicate:((cs_bill_customer_sk is not null and cs_item_sk is not null) and cs_sold_date_sk is not null)
+                                                    predicate:(cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                                     TableScan [TS_6] (rows=1 width=0)
                                                       default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
                                             <-Reducer 2 [SIMPLE_EDGE]
@@ -130,7 +130,7 @@ Stage-0
                                                     Select Operator [SEL_2] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                                       Filter Operator [FIL_90] (rows=1 width=0)
-                                                        predicate:((((ss_item_sk is not null and ss_customer_sk is not null) and ss_ticket_number is not null) and ss_sold_date_sk is not null) and ss_store_sk is not null)
+                                                        predicate:(ss_item_sk is not null and ss_customer_sk is not null and ss_ticket_number is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
                                                         TableScan [TS_0] (rows=1 width=0)
                                                           default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
                                                 <-Map 11 [SIMPLE_EDGE]
@@ -139,7 +139,7 @@ Stage-0
                                                     Select Operator [SEL_5] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4"]
                                                       Filter Operator [FIL_91] (rows=1 width=0)
-                                                        predicate:(((sr_item_sk is not null and sr_customer_sk is not null) and sr_ticket_number is not null) and sr_returned_date_sk is not null)
+                                                        predicate:(sr_item_sk is not null and sr_customer_sk is not null and sr_ticket_number is not null and sr_returned_date_sk is not null)
                                                         TableScan [TS_3] (rows=1 width=0)
                                                           default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query18.q.out b/ql/src/test/results/clientpositive/perf/query18.q.out
index 276ed57..1014bc1 100644
--- a/ql/src/test/results/clientpositive/perf/query18.q.out
+++ b/ql/src/test/results/clientpositive/perf/query18.q.out
@@ -73,7 +73,7 @@ Stage-0
                                     Select Operator [SEL_14] (rows=40000000 width=860)
                                       Output:["_col0","_col1","_col2","_col4"]
                                       Filter Operator [FIL_80] (rows=40000000 width=860)
-                                        predicate:((((c_birth_month) IN (9, 5, 12, 4, 1, 10) and c_customer_sk is not null) and c_current_addr_sk is not null) and c_current_cdemo_sk is not null)
+                                        predicate:((c_birth_month) IN (9, 5, 12, 4, 1, 10) and c_customer_sk is not null and c_current_addr_sk is not null and c_current_cdemo_sk is not null)
                                         TableScan [TS_12] (rows=80000000 width=860)
                                           default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk","c_birth_month","c_birth_year"]
                                 <-Reducer 4 [SIMPLE_EDGE]
@@ -101,7 +101,7 @@ Stage-0
                                             Select Operator [SEL_8] (rows=4950 width=362)
                                               Output:["_col0","_col3"]
                                               Filter Operator [FIL_78] (rows=4950 width=362)
-                                                predicate:(((cd_gender = 'M') and (cd_education_status = 'College')) and cd_demo_sk is not null)
+                                                predicate:((cd_gender = 'M') and (cd_education_status = 'College') and cd_demo_sk is not null)
                                                 TableScan [TS_6] (rows=19800 width=362)
                                                   default@customer_demographics,cd1,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_education_status","cd_dep_count"]
                                         <-Reducer 2 [SIMPLE_EDGE]
@@ -115,7 +115,7 @@ Stage-0
                                                 Select Operator [SEL_2] (rows=1 width=0)
                                                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
                                                   Filter Operator [FIL_76] (rows=1 width=0)
-                                                    predicate:(((cs_sold_date_sk is not null and cs_bill_cdemo_sk is not null) and cs_item_sk is not null) and cs_bill_customer_sk is not null)
+                                                    predicate:(cs_sold_date_sk is not null and cs_bill_cdemo_sk is not null and cs_item_sk is not null and cs_bill_customer_sk is not null)
                                                     TableScan [TS_0] (rows=1 width=0)
                                                       default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_bill_cdemo_sk","cs_item_sk","cs_quantity","cs_list_price","cs_sales_price","cs_coupon_amt","cs_net_profit"]
                                             <-Map 10 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query19.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query19.q.out b/ql/src/test/results/clientpositive/perf/query19.q.out
index b0fda23..855c91e 100644
--- a/ql/src/test/results/clientpositive/perf/query19.q.out
+++ b/ql/src/test/results/clientpositive/perf/query19.q.out
@@ -100,7 +100,7 @@ Stage-0
                                             Select Operator [SEL_2] (rows=18262 width=1119)
                                               Output:["_col0"]
                                               Filter Operator [FIL_64] (rows=18262 width=1119)
-                                                predicate:(((d_moy = 11) and (d_year = 1999)) and d_date_sk is not null)
+                                                predicate:((d_moy = 11) and (d_year = 1999) and d_date_sk is not null)
                                                 TableScan [TS_0] (rows=73049 width=1119)
                                                   default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                         <-Map 9 [SIMPLE_EDGE]
@@ -109,7 +109,7 @@ Stage-0
                                             Select Operator [SEL_5] (rows=1 width=0)
                                               Output:["_col0","_col1","_col2","_col3","_col4"]
                                               Filter Operator [FIL_65] (rows=1 width=0)
-                                                predicate:(((ss_sold_date_sk is not null and ss_item_sk is not null) and ss_customer_sk is not null) and ss_store_sk is not null)
+                                                predicate:(ss_sold_date_sk is not null and ss_item_sk is not null and ss_customer_sk is not null and ss_store_sk is not null)
                                                 TableScan [TS_3] (rows=1 width=0)
                                                   default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ext_sales_price"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query21.q.out b/ql/src/test/results/clientpositive/perf/query21.q.out
index e6b12d4..1be6627 100644
--- a/ql/src/test/results/clientpositive/perf/query21.q.out
+++ b/ql/src/test/results/clientpositive/perf/query21.q.out
@@ -120,7 +120,7 @@ Stage-0
                                     Select Operator [SEL_2] (rows=1 width=0)
                                       Output:["_col0","_col1","_col2","_col3"]
                                       Filter Operator [FIL_41] (rows=1 width=0)
-                                        predicate:((inv_warehouse_sk is not null and inv_item_sk is not null) and inv_date_sk is not null)
+                                        predicate:(inv_warehouse_sk is not null and inv_item_sk is not null and inv_date_sk is not null)
                                         TableScan [TS_0] (rows=1 width=0)
                                           default@inventory,inventory,Tbl:PARTIAL,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
                                 <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query22.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query22.q.out b/ql/src/test/results/clientpositive/perf/query22.q.out
index 7ee3423..4729bba 100644
--- a/ql/src/test/results/clientpositive/perf/query22.q.out
+++ b/ql/src/test/results/clientpositive/perf/query22.q.out
@@ -70,7 +70,7 @@ Stage-0
                                     Select Operator [SEL_2] (rows=1 width=0)
                                       Output:["_col0","_col1","_col2","_col3"]
                                       Filter Operator [FIL_40] (rows=1 width=0)
-                                        predicate:((inv_date_sk is not null and inv_item_sk is not null) and inv_warehouse_sk is not null)
+                                        predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_warehouse_sk is not null)
                                         TableScan [TS_0] (rows=1 width=0)
                                           default@inventory,inventory,Tbl:PARTIAL,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
                                 <-Map 7 [SIMPLE_EDGE]

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query25.q.out b/ql/src/test/results/clientpositive/perf/query25.q.out
index 0157845..f6d1d0d 100644
--- a/ql/src/test/results/clientpositive/perf/query25.q.out
+++ b/ql/src/test/results/clientpositive/perf/query25.q.out
@@ -74,7 +74,7 @@ Stage-0
                                     Select Operator [SEL_17] (rows=18262 width=1119)
                                       Output:["_col0"]
                                       Filter Operator [FIL_94] (rows=18262 width=1119)
-                                        predicate:((d_moy BETWEEN 4 AND 10 and (d_year = 1998)) and d_date_sk is not null)
+                                        predicate:(d_moy BETWEEN 4 AND 10 and (d_year = 1998) and d_date_sk is not null)
                                         TableScan [TS_15] (rows=73049 width=1119)
                                           default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                 <-Reducer 5 [SIMPLE_EDGE]
@@ -88,7 +88,7 @@ Stage-0
                                         Select Operator [SEL_14] (rows=18262 width=1119)
                                           Output:["_col0"]
                                           Filter Operator [FIL_93] (rows=18262 width=1119)
-                                            predicate:((d_moy BETWEEN 4 AND 10 and (d_year = 1998)) and d_date_sk is not null)
+                                            predicate:(d_moy BETWEEN 4 AND 10 and (d_year = 1998) and d_date_sk is not null)
                                             TableScan [TS_12] (rows=73049 width=1119)
                                               default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                     <-Reducer 4 [SIMPLE_EDGE]
@@ -102,7 +102,7 @@ Stage-0
                                             Select Operator [SEL_11] (rows=18262 width=1119)
                                               Output:["_col0"]
                                               Filter Operator [FIL_92] (rows=18262 width=1119)
-                                                predicate:(((d_moy = 4) and (d_year = 1998)) and d_date_sk is not null)
+                                                predicate:((d_moy = 4) and (d_year = 1998) and d_date_sk is not null)
                                                 TableScan [TS_9] (rows=73049 width=1119)
                                                   default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                         <-Reducer 3 [SIMPLE_EDGE]
@@ -116,7 +116,7 @@ Stage-0
                                                 Select Operator [SEL_8] (rows=1 width=0)
                                                   Output:["_col0","_col1","_col2","_col3"]
                                                   Filter Operator [FIL_91] (rows=1 width=0)
-                                                    predicate:((cs_bill_customer_sk is not null and cs_item_sk is not null) and cs_sold_date_sk is not null)
+                                                    predicate:(cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                                     TableScan [TS_6] (rows=1 width=0)
                                                       default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_net_profit"]
                                             <-Reducer 2 [SIMPLE_EDGE]
@@ -130,7 +130,7 @@ Stage-0
                                                     Select Operator [SEL_2] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                                       Filter Operator [FIL_89] (rows=1 width=0)
-                                                        predicate:((((ss_item_sk is not null and ss_customer_sk is not null) and ss_ticket_number is not null) and ss_sold_date_sk is not null) and ss_store_sk is not null)
+                                                        predicate:(ss_item_sk is not null and ss_customer_sk is not null and ss_ticket_number is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
                                                         TableScan [TS_0] (rows=1 width=0)
                                                           default@store_sales,store_sales,Tbl:PARTIAL,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_net_profit"]
                                                 <-Map 11 [SIMPLE_EDGE]
@@ -139,7 +139,7 @@ Stage-0
                                                     Select Operator [SEL_5] (rows=1 width=0)
                                                       Output:["_col0","_col1","_col2","_col3","_col4"]
                                                       Filter Operator [FIL_90] (rows=1 width=0)
-                                                        predicate:(((sr_item_sk is not null and sr_customer_sk is not null) and sr_ticket_number is not null) and sr_returned_date_sk is not null)
+                                                        predicate:(sr_item_sk is not null and sr_customer_sk is not null and sr_ticket_number is not null and sr_returned_date_sk is not null)
                                                         TableScan [TS_3] (rows=1 width=0)
                                                           default@store_returns,store_returns,Tbl:PARTIAL,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_net_loss"]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/677e5d20/ql/src/test/results/clientpositive/perf/query26.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query26.q.out b/ql/src/test/results/clientpositive/perf/query26.q.out
index 3fadc8f..9471100 100644
--- a/ql/src/test/results/clientpositive/perf/query26.q.out
+++ b/ql/src/test/results/clientpositive/perf/query26.q.out
@@ -83,7 +83,7 @@ Stage-0
                                       Select Operator [SEL_2] (rows=1 width=0)
                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                         Filter Operator [FIL_50] (rows=1 width=0)
-                                          predicate:(((cs_bill_cdemo_sk is not null and cs_sold_date_sk is not null) and cs_item_sk is not null) and cs_promo_sk is not null)
+                                          predicate:(cs_bill_cdemo_sk is not null and cs_sold_date_sk is not null and cs_item_sk is not null and cs_promo_sk is not null)
                                           TableScan [TS_0] (rows=1 width=0)
                                             default@catalog_sales,catalog_sales,Tbl:PARTIAL,Col:NONE,Output:["cs_sold_date_sk","cs_bill_cdemo_sk","cs_item_sk","cs_promo_sk","cs_quantity","cs_list_price","cs_sales_price","cs_coupon_amt"]
                                   <-Map 8 [SIMPLE_EDGE]
@@ -92,7 +92,7 @@ Stage-0
                                       Select Operator [SEL_5] (rows=2475 width=362)
                                         Output:["_col0"]
                                         Filter Operator [FIL_51] (rows=2475 width=362)
-                                          predicate:((((cd_gender = 'F') and (cd_marital_status = 'W')) and (cd_education_status = 'Primary')) and cd_demo_sk is not null)
+                                          predicate:((cd_gender = 'F') and (cd_marital_status = 'W') and (cd_education_status = 'Primary') and cd_demo_sk is not null)
                                           TableScan [TS_3] (rows=19800 width=362)
                                             default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status"]
 


[26/58] [abbrv] hive git commit: HIVE-13183 : Adds MapredLocalTasks logs in operation log (Rajat Khandelwal, reviwed by Amareshwari)

Posted by jd...@apache.org.
HIVE-13183 :  Adds MapredLocalTasks logs in operation log (Rajat Khandelwal, reviwed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/14bcbab1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/14bcbab1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/14bcbab1

Branch: refs/heads/llap
Commit: 14bcbab13a1dd925943ffa860138e89a0521e888
Parents: dc010a3
Author: Rajat Khandelwal <pr...@apache.org>
Authored: Tue Apr 12 17:20:04 2016 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Tue Apr 12 17:20:04 2016 +0530

----------------------------------------------------------------------
 .../apache/hive/common/util/StreamPrinter.java    | 18 +++++++++++-------
 .../apache/hadoop/hive/ql/exec/TaskRunner.java    |  3 +--
 .../apache/hadoop/hive/ql/exec/mr/MapRedTask.java |  5 ++---
 .../hadoop/hive/ql/exec/mr/MapredLocalTask.java   |  9 ++++++---
 .../hadoop/hive/ql/session/OperationLog.java      | 10 +++++++++-
 .../clientpositive/auto_sortmerge_join_8.q.out    |  2 ++
 .../clientpositive/llap/tez_join_hash.q.out       |  4 ++++
 .../clientpositive/tez/tez_join_hash.q.out        |  4 ++++
 8 files changed, 39 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/common/src/java/org/apache/hive/common/util/StreamPrinter.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hive/common/util/StreamPrinter.java b/common/src/java/org/apache/hive/common/util/StreamPrinter.java
index 72638fd..1517751 100644
--- a/common/src/java/org/apache/hive/common/util/StreamPrinter.java
+++ b/common/src/java/org/apache/hive/common/util/StreamPrinter.java
@@ -33,12 +33,12 @@ import org.apache.hadoop.io.IOUtils;
 public class StreamPrinter extends Thread {
   InputStream is;
   String type;
-  PrintStream os;
+  PrintStream[] outputStreams;
 
-  public StreamPrinter(InputStream is, String type, PrintStream os) {
+  public StreamPrinter(InputStream is, String type, PrintStream... outputStreams) {
     this.is = is;
     this.type = type;
-    this.os = os;
+    this.outputStreams = outputStreams;
   }
 
   @Override
@@ -50,18 +50,22 @@ public class StreamPrinter extends Thread {
       String line = null;
       if (type != null) {
         while ((line = br.readLine()) != null) {
-          os.println(type + ">" + line);
+          for (PrintStream os: outputStreams) {
+            os.println(type + ">" + line);
+          }
         }
       } else {
         while ((line = br.readLine()) != null) {
-          os.println(line);
+          for (PrintStream os: outputStreams) {
+            os.println(line);
+          }
         }
       }
       br.close();
-      br=null;
+      br = null;
     } catch (IOException ioe) {
       ioe.printStackTrace();
-    }finally{
+    } finally {
       IOUtils.closeStream(br);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
index f6fd081..81f6db0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskRunner.java
@@ -32,7 +32,6 @@ import org.slf4j.LoggerFactory;
  **/
 
 public class TaskRunner extends Thread {
-
   protected Task<? extends Serializable> tsk;
   protected TaskResult result;
   protected SessionState ss;
@@ -103,7 +102,7 @@ public class TaskRunner extends Thread {
       if (tsk.getException() == null) {
         tsk.setException(t);
       }
-      t.printStackTrace();
+      LOG.error("Error in executeTask", t);
     }
     result.setExitVal(exitVal, tsk.getException());
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
index 310356c..a42c2e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
@@ -301,8 +301,7 @@ public class MapRedTask extends ExecDriver implements Serializable {
 
       return exitVal;
     } catch (Exception e) {
-      e.printStackTrace();
-      LOG.error("Exception: " + e.getMessage());
+      LOG.error("Got exception", e);
       return (1);
     } finally {
       try {
@@ -313,7 +312,7 @@ public class MapRedTask extends ExecDriver implements Serializable {
         }
 
       } catch (Exception e) {
-        LOG.error("Exception: " + e.getMessage());
+        LOG.error("Exception: ", e);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
index f5500a4..c81b14c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hive.ql.plan.FetchWork;
 import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
 import org.apache.hadoop.hive.ql.plan.OperatorDesc;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
@@ -317,8 +318,10 @@ public class MapredLocalTask extends Task<MapredLocalWork> implements Serializab
 
       CachingPrintStream errPrintStream = new CachingPrintStream(System.err);
 
-      StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out);
-      StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream);
+      StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, System.out,
+        OperationLog.getCurrentOperationLog().getPrintStream());
+      StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, errPrintStream,
+        OperationLog.getCurrentOperationLog().getPrintStream());
 
       outPrinter.start();
       errPrinter.start();
@@ -340,7 +343,7 @@ public class MapredLocalTask extends Task<MapredLocalWork> implements Serializab
 
       return exitVal;
     } catch (Exception e) {
-      LOG.error("Exception: " + e, e);
+      LOG.error("Exception: ", e);
       return (1);
     } finally {
       if (secureDoAs != null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java b/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
index 2ecdde9..6d0f14a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/OperationLog.java
@@ -39,7 +39,11 @@ public class OperationLog {
   private final LogFile logFile;
   private LoggingLevel opLoggingLevel = LoggingLevel.UNKNOWN;
 
-  public static enum LoggingLevel {
+  public PrintStream getPrintStream() {
+    return logFile.getPrintStream();
+  }
+
+  public enum LoggingLevel {
     NONE, EXECUTION, PERFORMANCE, VERBOSE, UNKNOWN
   }
 
@@ -221,5 +225,9 @@ public class OperationLog {
       }
       return logs;
     }
+
+    public PrintStream getPrintStream() {
+      return out;
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
index 38996a7..d22ba3b 100644
--- a/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
+++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_8.q.out
@@ -1507,6 +1507,8 @@ PREHOOK: Input: default@bucket_small
 PREHOOK: Input: default@bucket_small@ds=2008-04-08
 PREHOOK: Input: default@bucket_small@ds=2008-04-09
 #### A masked pattern was here ####
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask
+ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask
 POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@bucket_big

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out b/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
index 54ca9d2..1fd45aa 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
@@ -652,6 +652,10 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask
+ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask
+ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask
 POSTHOOK: query: select key, count(*) from (select x.key as key, y.value as value from
 srcpart x join srcpart y on (x.key = y.key)
 union all

http://git-wip-us.apache.org/repos/asf/hive/blob/14bcbab1/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out b/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out
index 8d0aba1..2f51094 100644
--- a/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out
@@ -638,6 +638,10 @@ PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask
+ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask
+ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask
 POSTHOOK: query: select key, count(*) from (select x.key as key, y.value as value from
 srcpart x join srcpart y on (x.key = y.key)
 union all


[17/58] [abbrv] hive git commit: HIVE-11959 : add simple test case for TestTableIterable (Thejas M Nair via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-11959 : add simple test case for TestTableIterable (Thejas M Nair  via Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/42fa60af
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/42fa60af
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/42fa60af

Branch: refs/heads/llap
Commit: 42fa60af4af204227a8c69d91084282c7a66dfc8
Parents: 010157e
Author: Thejas Nair <th...@hortonworks.com>
Authored: Sun Apr 10 17:30:38 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Sun Apr 10 17:30:38 2016 -0700

----------------------------------------------------------------------
 .../hive/ql/metadata/TestTableIterable.java     | 67 ++++++++++++++++++++
 1 file changed, 67 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/42fa60af/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java
new file mode 100644
index 0000000..f6ebcce
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.junit.Test;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.thrift.TException;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+/**
+ * Unit tests for TableIterable
+ */
+public class TestTableIterable  {
+
+  @Test
+  public void testNumReturned() throws MetaException, InvalidOperationException, UnknownDBException, TException {
+    HiveMetaStoreClient msc = mock(HiveMetaStoreClient.class);
+
+
+    // create a mocked metastore client that returns 3 table objects every time it is called
+    // will use same size for TableIterable batch fetch size
+    List<Table> threeTables = Arrays.asList(new Table(), new Table(), new Table());
+    when(msc.getTableObjectsByName(anyString(), anyListOf(String.class))).thenReturn(threeTables);
+
+    List<String> tableNames = Arrays.asList("a", "b", "c", "d", "e", "f");
+    TableIterable tIterable = new TableIterable(msc, "dummy", tableNames, threeTables.size());
+    tIterable.iterator();
+
+    Iterator<Table> tIter = tIterable.iterator();
+    int size = 0;
+    while(tIter.hasNext()) {
+      size++;
+      tIter.next();
+    }
+    assertEquals("Number of table objects returned", size, tableNames.size());
+
+    verify(msc).getTableObjectsByName("dummy", Arrays.asList("a","b","c"));
+    verify(msc).getTableObjectsByName("dummy", Arrays.asList("d","e","f"));
+    
+  }
+}


[53/58] [abbrv] hive git commit: HIVE-13287: Add logic to estimate stats for IN operator (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13287: Add logic to estimate stats for IN operator (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3fec161d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3fec161d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3fec161d

Branch: refs/heads/llap
Commit: 3fec161dad40860d493dff203f9da3925226bb8e
Parents: 833a7d1
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Mar 23 21:00:41 2016 +0000
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Apr 15 12:01:26 2016 +0100

----------------------------------------------------------------------
 .../stats/annotation/StatsRulesProcFactory.java | 107 ++++++++++++++++++-
 .../clientpositive/filter_cond_pushdown.q.out   |   6 +-
 .../groupby_multi_single_reducer3.q.out         |   8 +-
 .../llap/dynamic_partition_pruning_2.q.out      |  30 +++---
 ql/src/test/results/clientpositive/pcs.q.out    |   6 +-
 .../results/clientpositive/perf/query17.q.out   |   8 +-
 .../results/clientpositive/perf/query29.q.out   |   8 +-
 .../results/clientpositive/perf/query46.q.out   |  10 +-
 .../results/clientpositive/perf/query89.q.out   |   4 +-
 .../results/clientpositive/pointlookup.q.out    |  12 +--
 .../results/clientpositive/pointlookup2.q.out   |  16 +--
 .../results/clientpositive/pointlookup3.q.out   |   8 +-
 .../spark/groupby_multi_single_reducer3.q.out   |   8 +-
 .../tez/dynamic_partition_pruning_2.q.out       |  30 +++---
 14 files changed, 180 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index c4fc5ca..320dc10 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Set;
 import java.util.Stack;
 
 import org.apache.hadoop.hive.conf.HiveConf;
@@ -53,6 +54,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc.ExprNodeDescEqualityWrapper;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDynamicListDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc;
@@ -64,6 +66,7 @@ import org.apache.hadoop.hive.ql.plan.Statistics;
 import org.apache.hadoop.hive.ql.stats.StatsUtils;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualNS;
@@ -76,19 +79,24 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDFStruct;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 
 public class StatsRulesProcFactory {
 
   private static final Logger LOG = LoggerFactory.getLogger(StatsRulesProcFactory.class.getName());
   private static final boolean isDebugEnabled = LOG.isDebugEnabled();
 
+
   /**
    * Collect basic statistics like number of rows, data size and column level statistics from the
    * table. Also sets the state of the available statistics. Basic and column statistics can have
@@ -299,7 +307,7 @@ public class StatsRulesProcFactory {
 
     private long evaluateExpression(Statistics stats, ExprNodeDesc pred,
         AnnotateStatsProcCtx aspCtx, List<String> neededCols,
-        FilterOperator fop, long evaluatedRowCount) throws CloneNotSupportedException {
+        FilterOperator fop, long evaluatedRowCount) throws CloneNotSupportedException, SemanticException {
       long newNumRows = 0;
       Statistics andStats = null;
 
@@ -338,6 +346,9 @@ public class StatsRulesProcFactory {
               evaluatedRowCount = newNumRows;
             }
           }
+        } else if (udf instanceof GenericUDFIn) {
+          // for IN clause
+          newNumRows = evaluateInExpr(stats, pred, aspCtx, neededCols, fop);
         } else if (udf instanceof GenericUDFOPNot) {
           newNumRows = evaluateNotExpr(stats, pred, aspCtx, neededCols, fop);
         } else if (udf instanceof GenericUDFOPNotNull) {
@@ -375,9 +386,97 @@ public class StatsRulesProcFactory {
       return newNumRows;
     }
 
+    private long evaluateInExpr(Statistics stats, ExprNodeDesc pred, AnnotateStatsProcCtx aspCtx,
+            List<String> neededCols, FilterOperator fop) throws SemanticException {
+
+      long numRows = stats.getNumRows();
+
+      ExprNodeGenericFuncDesc fd = (ExprNodeGenericFuncDesc) pred;
+
+      // 1. It is an IN operator, check if it uses STRUCT
+      List<ExprNodeDesc> children = fd.getChildren();
+      List<ExprNodeDesc> columns = Lists.newArrayList();
+      List<ColStatistics> columnStats = Lists.newArrayList();
+      List<Set<ExprNodeDescEqualityWrapper>> values = Lists.newArrayList();
+      ExprNodeDesc columnsChild = children.get(0);
+      boolean multiColumn;
+      if (columnsChild instanceof ExprNodeGenericFuncDesc &&
+              ((ExprNodeGenericFuncDesc) columnsChild).getGenericUDF() instanceof GenericUDFStruct) {
+        for (int j = 0; j < columnsChild.getChildren().size(); j++) {
+          ExprNodeDesc columnChild = columnsChild.getChildren().get(j);
+          // If column is not column reference , we bail out
+          if (!(columnChild instanceof ExprNodeColumnDesc)) {
+            // Default
+            return numRows / 2;
+          }
+          columns.add(columnChild);
+          final String columnName = ((ExprNodeColumnDesc)columnChild).getColumn();
+          // if column name is not contained in needed column list then it
+          // is a partition column. We do not need to evaluate partition columns
+          // in filter expression since it will be taken care by partition pruner
+          if (neededCols != null && !neededCols.contains(columnName)) {
+            // Default
+            return numRows / 2;
+          }
+          columnStats.add(stats.getColumnStatisticsFromColName(columnName));
+          values.add(Sets.<ExprNodeDescEqualityWrapper>newHashSet());
+        }
+        multiColumn = true;
+      } else {
+        // If column is not column reference , we bail out
+        if (!(columnsChild instanceof ExprNodeColumnDesc)) {
+          // Default
+          return numRows / 2;
+        }
+        columns.add(columnsChild);
+        final String columnName = ((ExprNodeColumnDesc)columnsChild).getColumn();
+        // if column name is not contained in needed column list then it
+        // is a partition column. We do not need to evaluate partition columns
+        // in filter expression since it will be taken care by partition pruner
+        if (neededCols != null && !neededCols.contains(columnName)) {
+          // Default
+          return numRows / 2;
+        }
+        columnStats.add(stats.getColumnStatisticsFromColName(columnName));
+        values.add(Sets.<ExprNodeDescEqualityWrapper>newHashSet());
+        multiColumn = false;
+      }
+
+      // 2. Extract columns and values
+      for (int i = 1; i < children.size(); i++) {
+        ExprNodeDesc child = children.get(i);
+        // If value is not a constant, we bail out
+        if (!(child instanceof ExprNodeConstantDesc)) {
+          // Default
+          return numRows / 2;
+        }
+        if (multiColumn) {
+          ExprNodeConstantDesc constantChild = (ExprNodeConstantDesc) child;
+          List<?> items = (List<?>) constantChild.getWritableObjectInspector().getWritableConstantValue();
+          List<TypeInfo> structTypes = ((StructTypeInfo) constantChild.getTypeInfo()).getAllStructFieldTypeInfos();
+          for (int j = 0; j < structTypes.size(); j++) {
+            ExprNodeConstantDesc constant = new ExprNodeConstantDesc(structTypes.get(j), items.get(j));
+            values.get(j).add(new ExprNodeDescEqualityWrapper(constant));
+          }
+        } else {
+          values.get(0).add(new ExprNodeDescEqualityWrapper(child));
+        }
+      }
+
+      // 3. Calculate IN selectivity
+      float factor = 1;
+      for (int i = 0; i < columnStats.size(); i++) {
+        long dvs = columnStats.get(i) == null ? 0 : columnStats.get(i).getCountDistint();
+        // ( num of distinct vals for col / num of rows ) * num of distinct vals for col in IN clause
+        float columnFactor = dvs == 0 ? 0.5f : ((float)dvs / numRows) * values.get(i).size();
+        factor *= columnFactor;
+      }
+      return Math.round( (double)numRows * factor);
+    }
+
     private long evaluateNotExpr(Statistics stats, ExprNodeDesc pred,
         AnnotateStatsProcCtx aspCtx, List<String> neededCols, FilterOperator fop)
-        throws CloneNotSupportedException {
+        throws CloneNotSupportedException, SemanticException {
 
       long numRows = stats.getNumRows();
 
@@ -676,7 +775,7 @@ public class StatsRulesProcFactory {
 
     private long evaluateChildExpr(Statistics stats, ExprNodeDesc child,
         AnnotateStatsProcCtx aspCtx, List<String> neededCols,
-        FilterOperator fop, long evaluatedRowCount) throws CloneNotSupportedException {
+        FilterOperator fop, long evaluatedRowCount) throws CloneNotSupportedException, SemanticException {
 
       long numRows = stats.getNumRows();
 
@@ -761,7 +860,7 @@ public class StatsRulesProcFactory {
         } else if (udf instanceof GenericUDFOPNull) {
           return evaluateColEqualsNullExpr(stats, genFunc);
         } else if (udf instanceof GenericUDFOPAnd || udf instanceof GenericUDFOPOr
-            || udf instanceof GenericUDFOPNot) {
+            || udf instanceof GenericUDFIn || udf instanceof GenericUDFOPNot) {
           return evaluateExpression(stats, genFunc, aspCtx, neededCols, fop, evaluatedRowCount);
         }
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
index f48a5a4..132b590 100644
--- a/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/filter_cond_pushdown.q.out
@@ -442,14 +442,14 @@ STAGE PLANS:
           Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: (((_col1) IN ('2008-04-08', '2008-04-10') and (_col1) IN ('2008-04-08', '2008-04-09') and (_col3 = '2008-04-10')) or (_col3 = '2008-04-08')) (type: boolean)
-            Statistics: Num rows: 343 Data size: 3643 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 344 Data size: 3654 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string)
               outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 343 Data size: 3643 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 344 Data size: 3654 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
-                Statistics: Num rows: 343 Data size: 3643 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 344 Data size: 3654 Basic stats: COMPLETE Column stats: NONE
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                     output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
index 5362390..c5488de 100644
--- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
@@ -72,7 +72,7 @@ STAGE PLANS:
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((VALUE._col0) IN ('val_100', 'val_200', 'val_300') and (KEY._col0) IN (100, 150, 200)) (type: boolean)
-            Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: count()
               keys: KEY._col0 (type: string)
@@ -93,7 +93,7 @@ STAGE PLANS:
                       name: default.e1
           Filter Operator
             predicate: ((VALUE._col0) IN ('val_400', 'val_500') and (KEY._col0) IN (400, 450)) (type: boolean)
-            Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: count()
               keys: KEY._col0 (type: string)
@@ -404,7 +404,7 @@ STAGE PLANS:
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           Filter Operator
             predicate: ((VALUE._col0) IN ('val_100', 'val_200', 'val_300') and (KEY._col0) IN (100, 150, 200)) (type: boolean)
-            Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: count()
               keys: KEY._col0 (type: string)
@@ -425,7 +425,7 @@ STAGE PLANS:
                       name: default.e1
           Filter Operator
             predicate: ((VALUE._col0) IN ('val_400', 'val_500') and (KEY._col0) IN (400, 450)) (type: boolean)
-            Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
               aggregations: count()
               keys: KEY._col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
index 6f93b6a..db3b85d 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
@@ -208,31 +208,31 @@ STAGE PLANS:
                   Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((label) IN ('foo', 'bar') and id is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), label (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
                       Select Operator
                         expressions: _col0 (type: int)
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           keys: _col0 (type: int)
                           mode: hash
                           outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                           Dynamic Partitioning Event Operator
                             Target column: dim_shops_id (int)
                             Target Input: agg
                             Partition key expr: dim_shops_id
-                            Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                             Target Vertex: Map 1
             Execution mode: llap
             LLAP IO: no inputs
@@ -382,16 +382,16 @@ STAGE PLANS:
                   Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((label) IN ('foo', 'bar') and id is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), label (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
             Execution mode: llap
             LLAP IO: no inputs
@@ -757,31 +757,31 @@ STAGE PLANS:
                   Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((label) IN ('foo', 'bar') and id is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), label (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
                       Select Operator
                         expressions: _col0 (type: int)
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           keys: _col0 (type: int)
                           mode: hash
                           outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                           Dynamic Partitioning Event Operator
                             Target column: dim_shops_id (int)
                             Target Input: agg
                             Partition key expr: dim_shops_id
-                            Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                             Target Vertex: Map 1
             Execution mode: llap
             LLAP IO: no inputs

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/pcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pcs.q.out b/ql/src/test/results/clientpositive/pcs.q.out
index a1382f1..d6d2431 100644
--- a/ql/src/test/results/clientpositive/pcs.q.out
+++ b/ql/src/test/results/clientpositive/pcs.q.out
@@ -921,17 +921,17 @@ STAGE PLANS:
           Filter Operator
             isSamplingPred: false
             predicate: (struct(_col2,_col0,_col8)) IN (const struct('2000-04-08',1,'2000-04-09'), const struct('2000-04-09',2,'2000-04-08')) (type: boolean)
-            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: _col2 (type: string), _col6 (type: int)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: NONE
               File Output Operator
                 compressed: false
                 GlobalTableId: 0
 #### A masked pattern was here ####
                 NumFilesPerFileSink: 1
-                Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
                 table:
                     input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/perf/query17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query17.q.out b/ql/src/test/results/clientpositive/perf/query17.q.out
index f98ed99..1b5a640 100644
--- a/ql/src/test/results/clientpositive/perf/query17.q.out
+++ b/ql/src/test/results/clientpositive/perf/query17.q.out
@@ -71,9 +71,9 @@ Stage-0
                                 <-Map 15 [SIMPLE_EDGE]
                                   SHUFFLE [RS_37]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_17] (rows=36524 width=1119)
+                                    Select Operator [SEL_17] (rows=36525 width=1119)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_95] (rows=36524 width=1119)
+                                      Filter Operator [FIL_95] (rows=36525 width=1119)
                                         predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
                                         TableScan [TS_15] (rows=73049 width=1119)
                                           default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_quarter_name"]
@@ -85,9 +85,9 @@ Stage-0
                                     <-Map 14 [SIMPLE_EDGE]
                                       SHUFFLE [RS_34]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_14] (rows=36524 width=1119)
+                                        Select Operator [SEL_14] (rows=36525 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_94] (rows=36524 width=1119)
+                                          Filter Operator [FIL_94] (rows=36525 width=1119)
                                             predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
                                             TableScan [TS_12] (rows=73049 width=1119)
                                               default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_quarter_name"]

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/perf/query29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query29.q.out b/ql/src/test/results/clientpositive/perf/query29.q.out
index 0f4116a..39aca92 100644
--- a/ql/src/test/results/clientpositive/perf/query29.q.out
+++ b/ql/src/test/results/clientpositive/perf/query29.q.out
@@ -52,7 +52,7 @@ Stage-0
                         <-Reducer 7 [SIMPLE_EDGE]
                           SHUFFLE [RS_42]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_102] (rows=44193 width=1119)
+                            Merge Join Operator [MERGEJOIN_102] (rows=44194 width=1119)
                               Conds:RS_39._col3=RS_40._col0(Inner),Output:["_col1","_col5","_col10","_col14","_col24","_col25"]
                             <-Map 16 [SIMPLE_EDGE]
                               SHUFFLE [RS_40]
@@ -66,14 +66,14 @@ Stage-0
                             <-Reducer 6 [SIMPLE_EDGE]
                               SHUFFLE [RS_39]
                                 PartitionCols:_col3
-                                Merge Join Operator [MERGEJOIN_101] (rows=40176 width=1119)
+                                Merge Join Operator [MERGEJOIN_101] (rows=40177 width=1119)
                                   Conds:RS_36._col11=RS_37._col0(Inner),Output:["_col1","_col3","_col5","_col10","_col14"]
                                 <-Map 15 [SIMPLE_EDGE]
                                   SHUFFLE [RS_37]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_17] (rows=36524 width=1119)
+                                    Select Operator [SEL_17] (rows=36525 width=1119)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_94] (rows=36524 width=1119)
+                                      Filter Operator [FIL_94] (rows=36525 width=1119)
                                         predicate:((d_year) IN (2000, 2001, 2002) and d_date_sk is not null)
                                         TableScan [TS_15] (rows=73049 width=1119)
                                           default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/perf/query46.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query46.q.out b/ql/src/test/results/clientpositive/perf/query46.q.out
index 2bd87aa..11804c9 100644
--- a/ql/src/test/results/clientpositive/perf/query46.q.out
+++ b/ql/src/test/results/clientpositive/perf/query46.q.out
@@ -83,7 +83,7 @@ Stage-0
                                     <-Reducer 4 [SIMPLE_EDGE]
                                       SHUFFLE [RS_24]
                                         PartitionCols:_col3
-                                        Merge Join Operator [MERGEJOIN_87] (rows=24305 width=1119)
+                                        Merge Join Operator [MERGEJOIN_87] (rows=24306 width=1119)
                                           Conds:RS_21._col2=RS_22._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col7"]
                                         <-Map 12 [SIMPLE_EDGE]
                                           SHUFFLE [RS_22]
@@ -97,7 +97,7 @@ Stage-0
                                         <-Reducer 3 [SIMPLE_EDGE]
                                           SHUFFLE [RS_21]
                                             PartitionCols:_col2
-                                            Merge Join Operator [MERGEJOIN_86] (rows=22096 width=1119)
+                                            Merge Join Operator [MERGEJOIN_86] (rows=22097 width=1119)
                                               Conds:RS_18._col4=RS_19._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col7"]
                                             <-Map 11 [SIMPLE_EDGE]
                                               SHUFFLE [RS_19]
@@ -111,7 +111,7 @@ Stage-0
                                             <-Reducer 2 [SIMPLE_EDGE]
                                               SHUFFLE [RS_18]
                                                 PartitionCols:_col4
-                                                Merge Join Operator [MERGEJOIN_85] (rows=20088 width=1119)
+                                                Merge Join Operator [MERGEJOIN_85] (rows=20089 width=1119)
                                                   Conds:RS_15._col0=RS_16._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                                                 <-Map 1 [SIMPLE_EDGE]
                                                   SHUFFLE [RS_15]
@@ -125,9 +125,9 @@ Stage-0
                                                 <-Map 10 [SIMPLE_EDGE]
                                                   SHUFFLE [RS_16]
                                                     PartitionCols:_col0
-                                                    Select Operator [SEL_5] (rows=18262 width=1119)
+                                                    Select Operator [SEL_5] (rows=18263 width=1119)
                                                       Output:["_col0"]
-                                                      Filter Operator [FIL_79] (rows=18262 width=1119)
+                                                      Filter Operator [FIL_79] (rows=18263 width=1119)
                                                         predicate:((d_dow) IN (6, 0) and (d_year) IN (1998, 1999, 2000) and d_date_sk is not null)
                                                         TableScan [TS_3] (rows=73049 width=1119)
                                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_dow"]

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/perf/query89.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/query89.q.out b/ql/src/test/results/clientpositive/perf/query89.q.out
index 75f7385..de91d9b 100644
--- a/ql/src/test/results/clientpositive/perf/query89.q.out
+++ b/ql/src/test/results/clientpositive/perf/query89.q.out
@@ -117,9 +117,9 @@ Stage-0
                                         <-Map 9 [SIMPLE_EDGE]
                                           SHUFFLE [RS_16]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_8] (rows=36524 width=1119)
+                                            Select Operator [SEL_8] (rows=36525 width=1119)
                                               Output:["_col0","_col2"]
-                                              Filter Operator [FIL_49] (rows=36524 width=1119)
+                                              Filter Operator [FIL_49] (rows=36525 width=1119)
                                                 predicate:((d_year) IN (2000) and d_date_sk is not null)
                                                 TableScan [TS_6] (rows=73049 width=1119)
                                                   default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/pointlookup.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup.q.out b/ql/src/test/results/clientpositive/pointlookup.q.out
index 460cc74..78dd7bc 100644
--- a/ql/src/test/results/clientpositive/pointlookup.q.out
+++ b/ql/src/test/results/clientpositive/pointlookup.q.out
@@ -111,14 +111,14 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -177,14 +177,14 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (struct(key,value)) IN (const struct('0','8'), const struct('1','5'), const struct('2','6'), const struct('3','8'), const struct('4','1'), const struct('5','6'), const struct('6','1'), const struct('7','1'), const struct('8','1'), const struct('9','1'), const struct('10','3')) (type: boolean)
-              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: string)
                 outputColumnNames: _col0
-                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/pointlookup2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup2.q.out b/ql/src/test/results/clientpositive/pointlookup2.q.out
index 869e4cd..6fc6e7f 100644
--- a/ql/src/test/results/clientpositive/pointlookup2.q.out
+++ b/ql/src/test/results/clientpositive/pointlookup2.q.out
@@ -1169,7 +1169,7 @@ STAGE PLANS:
           Filter Operator
             isSamplingPred: false
             predicate: (struct(_col2,_col4)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
-            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
@@ -1197,7 +1197,7 @@ STAGE PLANS:
               key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string)
               null sort order: aaa
               sort order: +++
-              Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
               tag: -1
               value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
               auto parallelism: false
@@ -1231,13 +1231,13 @@ STAGE PLANS:
         Select Operator
           expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey2 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
@@ -1590,7 +1590,7 @@ STAGE PLANS:
           Filter Operator
             isSamplingPred: false
             predicate: (struct(_col0,_col3)) IN (const struct(1,'2000-04-08'), const struct(2,'2000-04-09')) (type: boolean)
-            Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
@@ -1618,7 +1618,7 @@ STAGE PLANS:
               key expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string)
               null sort order: aaa
               sort order: +++
-              Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: NONE
               tag: -1
               value expressions: _col2 (type: string), _col4 (type: int), _col5 (type: string)
               auto parallelism: false
@@ -1652,13 +1652,13 @@ STAGE PLANS:
         Select Operator
           expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: string), KEY.reducesinkkey2 (type: string), VALUE._col1 (type: int), VALUE._col2 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
-          Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 16 Data size: 128 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/pointlookup3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/pointlookup3.q.out b/ql/src/test/results/clientpositive/pointlookup3.q.out
index e98ba76..2b25b39 100644
--- a/ql/src/test/results/clientpositive/pointlookup3.q.out
+++ b/ql/src/test/results/clientpositive/pointlookup3.q.out
@@ -1337,7 +1337,7 @@ STAGE PLANS:
           Filter Operator
             isSamplingPred: false
             predicate: (struct(_col2,_col4)) IN (const struct('2000-04-08',1), const struct('2000-04-09',2)) (type: boolean)
-            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               GlobalTableId: 0
@@ -1365,7 +1365,7 @@ STAGE PLANS:
               key expressions: _col4 (type: int), _col5 (type: string), _col2 (type: string)
               null sort order: aaa
               sort order: +++
-              Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+              Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
               tag: -1
               value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string), _col6 (type: string), _col7 (type: string)
               auto parallelism: false
@@ -1399,13 +1399,13 @@ STAGE PLANS:
         Select Operator
           expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY.reducesinkkey2 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string), VALUE._col3 (type: string), VALUE._col4 (type: string)
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
-          Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+          Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
             GlobalTableId: 0
 #### A masked pattern was here ####
             NumFilesPerFileSink: 1
-            Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE
+            Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE
 #### A masked pattern was here ####
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
index 7bb3ff2..982d719 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
@@ -78,7 +78,7 @@ STAGE PLANS:
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((VALUE._col0) IN ('val_100', 'val_200', 'val_300') and (KEY._col0) IN (100, 150, 200)) (type: boolean)
-                  Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
                     keys: KEY._col0 (type: string)
@@ -99,7 +99,7 @@ STAGE PLANS:
                             name: default.e1
                 Filter Operator
                   predicate: ((VALUE._col0) IN ('val_400', 'val_500') and (KEY._col0) IN (400, 450)) (type: boolean)
-                  Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
                     keys: KEY._col0 (type: string)
@@ -422,7 +422,7 @@ STAGE PLANS:
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                 Filter Operator
                   predicate: ((VALUE._col0) IN ('val_100', 'val_200', 'val_300') and (KEY._col0) IN (100, 150, 200)) (type: boolean)
-                  Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
                     keys: KEY._col0 (type: string)
@@ -443,7 +443,7 @@ STAGE PLANS:
                             name: default.e1
                 Filter Operator
                   predicate: ((VALUE._col0) IN ('val_400', 'val_500') and (KEY._col0) IN (400, 450)) (type: boolean)
-                  Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 63 Data size: 669 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count()
                     keys: KEY._col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/3fec161d/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
index e129795..71b7ee3 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out
@@ -206,31 +206,31 @@ STAGE PLANS:
                   Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((label) IN ('foo', 'bar') and id is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), label (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
                       Select Operator
                         expressions: _col0 (type: int)
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           keys: _col0 (type: int)
                           mode: hash
                           outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                           Dynamic Partitioning Event Operator
                             Target column: dim_shops_id (int)
                             Target Input: agg
                             Partition key expr: dim_shops_id
-                            Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                             Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:
@@ -374,16 +374,16 @@ STAGE PLANS:
                   Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((label) IN ('foo', 'bar') and id is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), label (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
@@ -735,31 +735,31 @@ STAGE PLANS:
                   Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((label) IN ('foo', 'bar') and id is not null) (type: boolean)
-                    Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: id (type: int), label (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                      Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: string)
                       Select Operator
                         expressions: _col0 (type: int)
                         outputColumnNames: _col0
-                        Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                        Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                         Group By Operator
                           keys: _col0 (type: int)
                           mode: hash
                           outputColumnNames: _col0
-                          Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                          Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                           Dynamic Partitioning Event Operator
                             Target column: dim_shops_id (int)
                             Target Input: agg
                             Partition key expr: dim_shops_id
-                            Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
+                            Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                             Target Vertex: Map 1
         Reducer 2 
             Reduce Operator Tree:


[52/58] [abbrv] hive git commit: HIVE-13418 : HiveServer2 HTTP mode should support X-Forwarded-Host header for authorization/audits (Thejas Nair, reviewed by Vaibhav Gumashta)

Posted by jd...@apache.org.
HIVE-13418 : HiveServer2 HTTP mode should support X-Forwarded-Host header for authorization/audits (Thejas Nair, reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/833a7d15
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/833a7d15
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/833a7d15

Branch: refs/heads/llap
Commit: 833a7d158b3a8e45f492e7c82640c1a367d79b30
Parents: cc2d0f0
Author: Thejas Nair <th...@hortonworks.com>
Authored: Thu Apr 14 17:43:18 2016 -0700
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Thu Apr 14 17:43:18 2016 -0700

----------------------------------------------------------------------
 .../cli/thrift/TestThriftBinaryCLIService.java  |  92 -------
 .../cli/thrift/TestThriftHttpCLIService.java    | 241 -------------------
 .../java/org/apache/hadoop/hive/ql/Driver.java  |   1 +
 .../hadoop/hive/ql/processors/CommandUtil.java  |   1 +
 .../AuthorizationMetaStoreFilterHook.java       |   1 +
 .../authorization/plugin/QueryContext.java      |  19 +-
 .../hadoop/hive/ql/session/SessionState.java    |  10 +
 .../cli/operation/MetadataOperation.java        |   1 +
 .../service/cli/session/HiveSessionImpl.java    |   2 +
 .../service/cli/session/SessionManager.java     |  21 +-
 .../service/cli/thrift/ThriftHttpServlet.java   |  17 ++
 .../cli/thrift/ThriftCLIServiceTest.java        |   4 +-
 12 files changed, 68 insertions(+), 342 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftBinaryCLIService.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftBinaryCLIService.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftBinaryCLIService.java
deleted file mode 100644
index de31699..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftBinaryCLIService.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.service.cli.thrift;
-
-import static org.junit.Assert.assertNotNull;
-
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hive.service.auth.HiveAuthFactory.AuthTypes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-
-
-/**
- *
- * TestThriftBinaryCLIService.
- * This tests ThriftCLIService started in binary mode.
- *
- */
-
-public class TestThriftBinaryCLIService extends ThriftCLIServiceTest {
-
-  private static String transportMode = "binary";
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    // Set up the base class
-    ThriftCLIServiceTest.setUpBeforeClass();
-
-    assertNotNull(port);
-    assertNotNull(hiveServer2);
-    assertNotNull(hiveConf);
-
-    hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, host);
-    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, port);
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, AuthTypes.NONE.toString());
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, transportMode);
-
-    startHiveServer2WithConf(hiveConf);
-
-    client = getServiceClientInternal();
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    ThriftCLIServiceTest.tearDownAfterClass();
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @Override
-  @Before
-  public void setUp() throws Exception {
-
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @Override
-  @After
-  public void tearDown() throws Exception {
-
-  }
-
-
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java
deleted file mode 100644
index 3ed6dd8..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIService.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hive.service.cli.thrift;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hive.jdbc.HttpBasicAuthInterceptor;
-import org.apache.hive.service.auth.HiveAuthFactory;
-import org.apache.hive.service.auth.HiveAuthFactory.AuthTypes;
-import org.apache.hive.service.rpc.thrift.TCLIService;
-import org.apache.hive.service.rpc.thrift.TOpenSessionReq;
-import org.apache.http.HttpException;
-import org.apache.http.HttpRequest;
-import org.apache.http.client.CookieStore;
-import org.apache.http.impl.client.DefaultHttpClient;
-import org.apache.http.protocol.HttpContext;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.transport.THttpClient;
-import org.apache.thrift.transport.TTransport;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- *
- * TestThriftHttpCLIService.
- * This tests ThriftCLIService started in http mode.
- *
- */
-
-public class TestThriftHttpCLIService extends ThriftCLIServiceTest {
-
-  private static String transportMode = "http";
-  private static String thriftHttpPath = "cliservice";
-
-  /**
-   *  HttpBasicAuthInterceptorWithLogging
-   *  This adds httpRequestHeaders to the BasicAuthInterceptor
-   */
-  public class HttpBasicAuthInterceptorWithLogging extends HttpBasicAuthInterceptor {
-
-   ArrayList<String> requestHeaders;
-
-   public HttpBasicAuthInterceptorWithLogging(String username,
-      String password, CookieStore cookieStore, String cn, boolean isSSL,
-      Map<String, String> additionalHeaders) {
-      super(username, password, cookieStore, cn, isSSL, additionalHeaders);
-      requestHeaders = new ArrayList<String>();
-    }
-
-    @Override
-    public void process(HttpRequest httpRequest, HttpContext httpContext)
-      throws HttpException, IOException {
-      super.process(httpRequest, httpContext);
-
-      String currHeaders = "";
-
-      for (org.apache.http.Header h : httpRequest.getAllHeaders()) {
-        currHeaders += h.getName() + ":" + h.getValue() + " ";
-      }
-      requestHeaders.add(currHeaders);
-    }
-
-    public ArrayList<String>  getRequestHeaders() {
-      return requestHeaders;
-    }
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    // Set up the base class
-    ThriftCLIServiceTest.setUpBeforeClass();
-
-    assertNotNull(port);
-    assertNotNull(hiveServer2);
-    assertNotNull(hiveConf);
-
-    hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, host);
-    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT, port);
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, AuthTypes.NOSASL.toString());
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, transportMode);
-    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH, thriftHttpPath);
-
-    startHiveServer2WithConf(hiveConf);
-
-    client = getServiceClientInternal();
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    ThriftCLIServiceTest.tearDownAfterClass();
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @Override
-  @Before
-  public void setUp() throws Exception {
-
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @Override
-  @After
-  public void tearDown() throws Exception {
-
-  }
-
-  @Test
-  /**
-   * Tests calls from a raw (NOSASL) binary client,
-   * to a HiveServer2 running in http mode.
-   * This should throw an expected exception due to incompatibility.
-   * @throws Exception
-   */
-  public void testBinaryClientHttpServer() throws Exception {
-    TTransport transport = getRawBinaryTransport();
-    TCLIService.Client rawBinaryClient = getClient(transport);
-
-    // This will throw an expected exception since client-server modes are incompatible
-    testOpenSessionExpectedException(rawBinaryClient);
-  }
-
-  /**
-   * Configure a wrong service endpoint for the client transport,
-   * and test for error.
-   * @throws Exception
-   */
-  @Test
-  public void testIncorrectHttpPath() throws Exception {
-    thriftHttpPath = "wrongPath";
-    TTransport transport = getHttpTransport();
-    TCLIService.Client httpClient = getClient(transport);
-
-    // This will throw an expected exception since
-    // client is communicating with the wrong http service endpoint
-    testOpenSessionExpectedException(httpClient);
-
-    // Reset to correct http path
-    thriftHttpPath = "cliservice";
-  }
-
-  private void testOpenSessionExpectedException(TCLIService.Client client) {
-    boolean caughtEx = false;
-    // Create a new open session request object
-    TOpenSessionReq openReq = new TOpenSessionReq();
-    try {
-      client.OpenSession(openReq).getSessionHandle();
-    } catch (Exception e) {
-      caughtEx = true;
-      System.out.println("Exception expected: " + e.toString());
-    }
-    assertTrue("Exception expected", caughtEx);
-  }
-
-  private TCLIService.Client getClient(TTransport transport) throws Exception {
-    // Create the corresponding client
-    TProtocol protocol = new TBinaryProtocol(transport);
-    return new TCLIService.Client(protocol);
-  }
-
-  private TTransport getRawBinaryTransport() throws Exception {
-    return HiveAuthFactory.getSocketTransport(host, port, 0);
-  }
-
-  private static TTransport getHttpTransport() throws Exception {
-    DefaultHttpClient httpClient = new DefaultHttpClient();
-    String httpUrl = transportMode + "://" + host + ":" + port +
-        "/" + thriftHttpPath + "/";
-    httpClient.addRequestInterceptor(
-				     new HttpBasicAuthInterceptor(USERNAME, PASSWORD, null, null, false, null));
-    return new THttpClient(httpUrl, httpClient);
-  }
-
-  /**
-   * Test additional http headers passed to request interceptor.
-   * @throws Exception
-   */
-  @Test
-  public void testAdditionalHttpHeaders() throws Exception {
-    TTransport transport;
-    DefaultHttpClient hClient = new DefaultHttpClient();
-    String httpUrl = transportMode + "://" + host + ":" + port +
-        "/" + thriftHttpPath + "/";
-    Map<String, String> additionalHeaders = new HashMap<String, String>();
-    additionalHeaders.put("key1", "value1");
-    additionalHeaders.put("key2", "value2");
-    HttpBasicAuthInterceptorWithLogging authInt =
-      new HttpBasicAuthInterceptorWithLogging(USERNAME, PASSWORD, null, null,
-      false, additionalHeaders);
-    hClient.addRequestInterceptor(authInt);
-    transport = new THttpClient(httpUrl, hClient);
-    TCLIService.Client httpClient = getClient(transport);
-
-    // Create a new open session request object
-    TOpenSessionReq openReq = new TOpenSessionReq();
-    httpClient.OpenSession(openReq).getSessionHandle();
-    ArrayList<String> headers = authInt.getRequestHeaders();
-
-    for (String h : headers) {
-      assertTrue(h.contains("key1:value1"));
-      assertTrue(h.contains("key2:value2"));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index bd510d6..92c2c76 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -848,6 +848,7 @@ public class Driver implements CommandProcessor {
      */
 
     QueryContext.Builder authzContextBuilder = new QueryContext.Builder();
+    authzContextBuilder.setForwardedAddresses(ss.getForwardedAddresses());
     authzContextBuilder.setCommandString(command);
 
     HiveOperationType hiveOpType = getHiveOperationType(op);

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java
index 7971dab..9288ee2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/CommandUtil.java
@@ -82,6 +82,7 @@ class CommandUtil {
     HivePrivilegeObject commandObj = HivePrivilegeObject.createHivePrivilegeObject(command);
     QueryContext.Builder ctxBuilder = new QueryContext.Builder();
     ctxBuilder.setCommandString(Joiner.on(' ').join(command));
+    ctxBuilder.setForwardedAddresses(ss.getForwardedAddresses());
     ss.getAuthorizerV2().checkPrivileges(type, Arrays.asList(commandObj), null, ctxBuilder.build());
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java
index a9ad015..20367da 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java
@@ -74,6 +74,7 @@ public class AuthorizationMetaStoreFilterHook extends DefaultMetaStoreFilterHook
   private List<HivePrivilegeObject> getFilteredObjects(List<HivePrivilegeObject> listObjs) throws MetaException {
     SessionState ss = SessionState.get();
     QueryContext.Builder authzContextBuilder = new QueryContext.Builder();
+    authzContextBuilder.setForwardedAddresses(ss.getForwardedAddresses());
     try {
       return ss.getAuthorizerV2().filterListCmdObjects(listObjs, authzContextBuilder.build());
     } catch (HiveAuthzPluginException e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/QueryContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/QueryContext.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/QueryContext.java
index 318343c..17f8913 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/QueryContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/QueryContext.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hive.ql.security.authorization.plugin;
 
+import java.util.List;
+
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving;
 
@@ -31,6 +33,7 @@ public final class QueryContext {
 
   public static class Builder {
     private String commandString;
+    private List<String> forwardedAddresses;
 
     public String getCommandString() {
       return commandString;
@@ -38,24 +41,38 @@ public final class QueryContext {
     public void setCommandString(String commandString) {
       this.commandString = commandString;
     }
+
+    public List<String> getForwardedAddresses() {
+      return forwardedAddresses;
+    }
+    public void setForwardedAddresses(List<String> forwardedAddresses) {
+      this.forwardedAddresses = forwardedAddresses;
+    }
+
     public QueryContext build(){
       return new QueryContext(this);
     }
   }
 
   private final String commandString;
+  private final List<String> forwardedAddresses;
 
   private QueryContext(Builder builder) {
     this.commandString = builder.commandString;
+    this.forwardedAddresses = builder.forwardedAddresses;
   }
 
   public String getCommandString() {
     return commandString;
   }
 
+  public List<String> getForwardedAddresses() {
+    return forwardedAddresses;
+  }
+
   @Override
   public String toString() {
-    return "QueryContext [commandString=" + commandString + "]";
+    return "QueryContext [commandString=" + commandString + ", forwardedAddresses=" + forwardedAddresses + "]";
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index ca18247..2b15c23 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -275,6 +275,8 @@ public class SessionState {
 
   private final ResourceDownloader resourceDownloader;
 
+  private List<String> forwardedAddresses;
+
   /**
    * Get the lineage state stored in this session.
    *
@@ -1677,6 +1679,14 @@ public class SessionState {
   public ResourceDownloader getResourceDownloader() {
     return resourceDownloader;
   }
+
+  public void setForwardedAddresses(List<String> forwardedAddresses) {
+    this.forwardedAddresses = forwardedAddresses;
+  }
+
+  public List<String> getForwardedAddresses() {
+    return forwardedAddresses;
+  }
 }
 
 class ResourceMaps {

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
index c4a7e69..77228fa 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/MetadataOperation.java
@@ -135,6 +135,7 @@ public abstract class MetadataOperation extends Operation {
       String cmdString) throws HiveSQLException {
     SessionState ss = SessionState.get();
     QueryContext.Builder ctxBuilder = new QueryContext.Builder();
+    ctxBuilder.setForwardedAddresses(ss.getForwardedAddresses());
     ctxBuilder.setCommandString(cmdString);
     try {
       ss.getAuthorizerV2().checkPrivileges(opType, inpObjs, null,

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 80a1844..0f36cd6 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -159,6 +159,7 @@ public class HiveSessionImpl implements HiveSession {
     sessionState = new SessionState(hiveConf, username);
     sessionState.setUserIpAddress(ipAddress);
     sessionState.setIsHiveServerQuery(true);
+    sessionState.setForwardedAddresses(SessionManager.getForwardedAddresses());
     SessionState.start(sessionState);
     try {
       sessionState.reloadAuxJars();
@@ -326,6 +327,7 @@ public class HiveSessionImpl implements HiveSession {
     // Need to make sure that the this HiveServer2's session's SessionState is
     // stored in the thread local for the handler thread.
     SessionState.setCurrentSessionState(sessionState);
+    sessionState.setForwardedAddresses(SessionManager.getForwardedAddresses());
     if (userAccess) {
       lastAccessTime = System.currentTimeMillis();
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
index 79eddd9..ad8678e 100644
--- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
+++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java
@@ -395,12 +395,7 @@ public class SessionManager extends CompositeService {
     return operationManager;
   }
 
-  private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>() {
-    @Override
-    protected String initialValue() {
-      return null;
-    }
-  };
+  private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>();
 
   public static void setIpAddress(String ipAddress) {
     threadLocalIpAddress.set(ipAddress);
@@ -414,6 +409,20 @@ public class SessionManager extends CompositeService {
     return threadLocalIpAddress.get();
   }
 
+  private static ThreadLocal<List<String>> threadLocalForwardedAddresses = new ThreadLocal<List<String>>();
+
+  public static void setForwardedAddresses(List<String> ipAddress) {
+    threadLocalForwardedAddresses.set(ipAddress);
+  }
+
+  public static void clearForwardedAddresses() {
+    threadLocalForwardedAddresses.remove();
+  }
+
+  public static List<String> getForwardedAddresses() {
+    return threadLocalForwardedAddresses.get();
+  }
+
   private static ThreadLocal<String> threadLocalUserName = new ThreadLocal<String>(){
     @Override
     protected String initialValue() {

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java
index 7e12fae..74d73b7 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java
@@ -20,7 +20,11 @@ package org.apache.hive.service.cli.thrift;
 
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
+import java.net.InetAddress;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
@@ -87,6 +91,7 @@ public class ThriftHttpServlet extends TServlet {
   private boolean isHttpOnlyCookie;
   private final HiveAuthFactory hiveAuthFactory;
   private static final String HIVE_DELEGATION_TOKEN_HEADER =  "X-Hive-Delegation-Token";
+  private static final String X_FORWARDED_FOR = "X-Forwarded-For";
 
   public ThriftHttpServlet(TProcessor processor, TProtocolFactory protocolFactory,
       String authType, UserGroupInformation serviceUGI, UserGroupInformation httpUGI,
@@ -166,6 +171,17 @@ public class ThriftHttpServlet extends TServlet {
       LOG.debug("Client IP Address: " + clientIpAddress);
       // Set the thread local ip address
       SessionManager.setIpAddress(clientIpAddress);
+
+      // get forwarded hosts address
+      String forwarded_for = request.getHeader(X_FORWARDED_FOR);
+      if (forwarded_for != null) {
+        LOG.debug("{}:{}", X_FORWARDED_FOR, forwarded_for);
+        List<String> forwardedAddresses = Arrays.asList(forwarded_for.split(","));
+        SessionManager.setForwardedAddresses(forwardedAddresses);
+      } else {
+        SessionManager.setForwardedAddresses(Collections.<String>emptyList());
+      }
+
       // Generate new cookie and add it to the response
       if (requireNewCookie &&
           !authType.equalsIgnoreCase(HiveAuthFactory.AuthTypes.NOSASL.toString())) {
@@ -195,6 +211,7 @@ public class ThriftHttpServlet extends TServlet {
       SessionManager.clearUserName();
       SessionManager.clearIpAddress();
       SessionManager.clearProxyUserName();
+      SessionManager.clearForwardedAddresses();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/833a7d15/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
index 630cfc9..1740079 100644
--- a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java
@@ -73,7 +73,7 @@ public abstract class ThriftCLIServiceTest {
     stopHiveServer2();
   }
 
-  protected static void startHiveServer2WithConf(HiveConf hiveConf) throws Exception {
+   static void startHiveServer2WithConf(HiveConf hiveConf) throws Exception {
     hiveServer2.init(hiveConf);
     // Start HiveServer2 with given config
     // Fail if server doesn't start
@@ -94,7 +94,7 @@ public abstract class ThriftCLIServiceTest {
     }
   }
 
-  protected static ThriftCLIServiceClient getServiceClientInternal() {
+  static ThriftCLIServiceClient getServiceClientInternal() {
     for (Service service : hiveServer2.getServices()) {
       if (service instanceof ThriftBinaryCLIService) {
         return new ThriftCLIServiceClient((ThriftBinaryCLIService) service);